diff --git a/src/diffusers/pipelines/ddim/pipeline_ddim.py b/src/diffusers/pipelines/ddim/pipeline_ddim.py index b9e590dea646..f5f2d404c2b4 100644 --- a/src/diffusers/pipelines/ddim/pipeline_ddim.py +++ b/src/diffusers/pipelines/ddim/pipeline_ddim.py @@ -96,10 +96,10 @@ def __call__( if self.device.type == "mps": # randn does not work reproducibly on mps - image = torch.randn(image_shape, generator=generator) + image = torch.randn(image_shape, generator=generator, dtype=self.unet.dtype) image = image.to(self.device) else: - image = torch.randn(image_shape, generator=generator, device=self.device) + image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(num_inference_steps) diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion.py b/tests/pipelines/altdiffusion/test_alt_diffusion.py index 91fe76444920..99406d85c16b 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion.py @@ -14,7 +14,6 @@ # limitations under the License. import gc -import random import unittest import numpy as np @@ -25,9 +24,9 @@ RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) -from diffusers.utils import floats_tensor, slow, torch_device +from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import XLMRobertaTokenizer +from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from ...test_pipelines_common import PipelineTesterMixin @@ -36,25 +35,11 @@ class AltDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() + pipeline_class = AltDiffusionPipeline - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_cond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -64,27 +49,15 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=9, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, ) - return model - - @property - def dummy_vae(self): torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -92,84 +65,90 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - @property - def dummy_text_encoder(self): + # TODO: address the non-deterministic text encoder (fails for save-load tests) + # torch.manual_seed(0) + # text_encoder_config = RobertaSeriesConfig( + # hidden_size=32, + # project_dim=32, + # intermediate_size=37, + # layer_norm_eps=1e-05, + # num_attention_heads=4, + # num_hidden_layers=5, + # vocab_size=5002, + # ) + # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) + torch.manual_seed(0) - config = RobertaSeriesConfig( + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, hidden_size=32, - project_dim=32, + projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, + pad_token_id=1, vocab_size=5002, ) - return RobertaSeriesModelWithTransformation(config) + text_encoder = CLIPTextModel(text_encoder_config) - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() + tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") + tokenizer.model_max_length = 77 - return extract + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_alt_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, + components = self.get_dummy_components() + torch.manual_seed(0) + text_encoder_config = RobertaSeriesConfig( + hidden_size=32, + project_dim=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + vocab_size=5002, ) + # TODO: remove after fixing the non-deterministic text encoder + text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) + components["text_encoder"] = text_encoder + + alt_pipe = AltDiffusionPipeline(**components) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) - prompt = "A photo of an astronaut" - - generator = torch.Generator(device=device).manual_seed(0) - output = alt_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = "A photo of an astronaut" + output = alt_pipe(**inputs) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = alt_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( @@ -177,89 +156,39 @@ def test_alt_diffusion_ddim(self): ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_alt_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + text_encoder_config = RobertaSeriesConfig( + hidden_size=32, + project_dim=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + vocab_size=5002, ) + # TODO: remove after fixing the non-deterministic text encoder + text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) + components["text_encoder"] = text_encoder + alt_pipe = AltDiffusionPipeline(**components) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = alt_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + inputs = self.get_dummy_inputs(device) + output = alt_pipe(**inputs) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = alt_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_alt_diffusion_fp16(self): - """Test that stable diffusion works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - alt_pipe = alt_pipe.to(torch_device) - alt_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = alt_pipe([prompt], generator=generator, num_inference_steps=2, output_type="np").images - - assert image.shape == (1, 64, 64, 3) @slow diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py index 434e55f946b6..761b2c013401 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py @@ -29,13 +29,11 @@ from diffusers.utils.testing_utils import require_torch_gpu from transformers import XLMRobertaTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class AltDiffusionImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class AltDiffusionImg2ImgPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/audio_diffusion/__init__.py b/tests/pipelines/audio_diffusion/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/pipelines/dance_diffusion/__init__.py b/tests/pipelines/dance_diffusion/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index a63ef84c63f5..5b96e7f17508 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -23,21 +23,20 @@ from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu +from ...test_pipelines_common import PipelineTesterMixin + torch.backends.cuda.matmul.allow_tf32 = False -class PipelineFastTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() +class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = DanceDiffusionPipeline + test_attention_slicing = False + test_cpu_offload = False - @property - def dummy_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet1DModel( + unet = UNet1DModel( block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=512, @@ -48,34 +47,44 @@ def dummy_unet(self): use_timestep_embedding=False, time_embedding_type="fourier", mid_block_type="UNetMidBlock1D", - down_block_types=["DownBlock1DNoSkip"] + ["DownBlock1D"] + ["AttnDownBlock1D"], - up_block_types=["AttnUpBlock1D"] + ["UpBlock1D"] + ["UpBlock1DNoSkip"], + down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), + up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), ) - return model + scheduler = IPNDMScheduler() + + components = { + "unet": unet, + "scheduler": scheduler, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "generator": generator, + "num_inference_steps": 4, + } + return inputs def test_dance_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - scheduler = IPNDMScheduler() - - pipe = DanceDiffusionPipeline(unet=self.dummy_unet, scheduler=scheduler) + components = self.get_dummy_components() + pipe = DanceDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=device).manual_seed(0) - output = pipe(generator=generator, num_inference_steps=4) + inputs = self.get_dummy_inputs(device) + output = pipe(**inputs) audio = output.audios - generator = torch.Generator(device=device).manual_seed(0) - output = pipe(generator=generator, num_inference_steps=4, return_dict=False) - audio_from_tuple = output[0] - audio_slice = audio[0, -3:, -3:] - audio_from_tuple_slice = audio_from_tuple[0, -3:, -3:] - assert audio.shape == (1, 2, self.dummy_unet.sample_size) + assert audio.shape == (1, 2, components["unet"].sample_size) expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(audio_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @slow diff --git a/tests/pipelines/ddim/test_ddim.py b/tests/pipelines/ddim/test_ddim.py index 2d03383599e0..26ea10caeabd 100644 --- a/tests/pipelines/ddim/test_ddim.py +++ b/tests/pipelines/ddim/test_ddim.py @@ -28,10 +28,11 @@ class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - @property - def dummy_uncond_unet(self): + pipeline_class = DDIMPipeline + + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DModel( + unet = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -40,32 +41,40 @@ def dummy_uncond_unet(self): down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) - return model + scheduler = DDIMScheduler() + components = {"unet": unet, "scheduler": scheduler} + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "generator": generator, + "num_inference_steps": 2, + "output_type": "numpy", + } + return inputs def test_inference(self): device = "cpu" - unet = self.dummy_uncond_unet - scheduler = DDIMScheduler() - ddpm = DDIMPipeline(unet=unet, scheduler=scheduler) - ddpm.to(device) - ddpm.set_progress_bar_config(disable=None) - - generator = torch.Generator(device=device).manual_seed(0) - image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0] + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - assert image.shape == (1, 32, 32, 3) + self.assertEqual(image.shape, (1, 32, 32, 3)) expected_slice = np.array( [1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) @slow diff --git a/tests/pipelines/ddpm/test_ddpm.py b/tests/pipelines/ddpm/test_ddpm.py index 0f7ca86273e0..7d51cb37e064 100644 --- a/tests/pipelines/ddpm/test_ddpm.py +++ b/tests/pipelines/ddpm/test_ddpm.py @@ -22,13 +22,11 @@ from diffusers.utils import deprecate from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class DDPMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class DDPMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/karras_ve/test_karras_ve.py b/tests/pipelines/karras_ve/test_karras_ve.py index 1fafa1cb40fa..9806e8bf373e 100644 --- a/tests/pipelines/karras_ve/test_karras_ve.py +++ b/tests/pipelines/karras_ve/test_karras_ve.py @@ -21,13 +21,11 @@ from diffusers import KarrasVePipeline, KarrasVeScheduler, UNet2DModel from diffusers.utils.testing_utils import require_torch, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class KarrasVePipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class KarrasVePipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion.py b/tests/pipelines/latent_diffusion/test_latent_diffusion.py index 9d5c07809dc0..ad8d77807294 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion.py @@ -22,13 +22,11 @@ from diffusers.utils.testing_utils import require_torch, slow, torch_device from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class LDMTextToImagePipelineFastTests(unittest.TestCase): @property def dummy_cond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py index d7992c2a43ab..89356f85728f 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -23,13 +23,11 @@ from diffusers.utils import PIL_INTERPOLATION, floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import require_torch -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class LDMSuperResolutionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class LDMSuperResolutionPipelineFastTests(unittest.TestCase): @property def dummy_image(self): batch_size = 1 diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py index f063d6759e9b..39ad12254e09 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py @@ -22,13 +22,11 @@ from diffusers.utils.testing_utils import require_torch, slow, torch_device from transformers import CLIPTextConfig, CLIPTextModel -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class LDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class LDMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/pndm/test_pndm.py b/tests/pipelines/pndm/test_pndm.py index 5d9212223e6e..8851607ccc07 100644 --- a/tests/pipelines/pndm/test_pndm.py +++ b/tests/pipelines/pndm/test_pndm.py @@ -21,13 +21,11 @@ from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel from diffusers.utils.testing_utils import require_torch, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class PNDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class PNDMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/score_sde_ve/test_score_sde_ve.py b/tests/pipelines/score_sde_ve/test_score_sde_ve.py index 9cdf3f0191e1..4379bd146f9b 100644 --- a/tests/pipelines/score_sde_ve/test_score_sde_ve.py +++ b/tests/pipelines/score_sde_ve/test_score_sde_ve.py @@ -21,13 +21,11 @@ from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel from diffusers.utils.testing_utils import require_torch, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class ScoreSdeVeipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class ScoreSdeVeipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py index 33157ed9ad30..ae0a13936603 100644 --- a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py @@ -20,7 +20,7 @@ import numpy as np import torch -from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel, UNet2DModel, VQModel +from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer @@ -32,39 +32,11 @@ class CycleDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) + pipeline_class = CycleDiffusionPipeline - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_uncond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model - - @property - def dummy_cond_unet(self): - torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -74,40 +46,16 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=9, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_vq_model(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + num_train_timesteps=1000, + clip_sample=False, + set_alpha_to_one=False, ) - return model - - @property - def dummy_vae(self): torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -115,12 +63,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -131,68 +75,50 @@ def dummy_text_encoder(self): pad_token_id=1, vocab_size=1000, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - return extract + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "An astronaut riding an elephant", + "source_prompt": "An astronaut riding a horse", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "eta": 0.1, + "strength": 0.8, + "guidance_scale": 3, + "source_guidance_scale": 1, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_cycle(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - num_train_timesteps=1000, - clip_sample=False, - set_alpha_to_one=False, - ) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = CycleDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - source_prompt = "An astronaut riding a horse" - prompt = "An astronaut riding an elephant" - init_image = self.dummy_image.to(device) + components = self.get_dummy_components() + pipe = CycleDiffusionPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - prompt=prompt, - source_prompt=source_prompt, - generator=generator, - num_inference_steps=2, - image=init_image, - eta=0.1, - strength=0.8, - guidance_scale=3, - source_guidance_scale=1, - output_type="np", - ) + inputs = self.get_dummy_inputs(device) + output = pipe(**inputs) images = output.images image_slice = images[0, -3:, -3:, -1] @@ -204,53 +130,16 @@ def test_stable_diffusion_cycle(self): @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_cycle_fp16(self): - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - num_train_timesteps=1000, - clip_sample=False, - set_alpha_to_one=False, - ) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = CycleDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - source_prompt = "An astronaut riding a horse" - prompt = "An astronaut riding an elephant" - init_image = self.dummy_image.to(torch_device) + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.half() + pipe = CycleDiffusionPipeline(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=torch_device).manual_seed(0) - output = sd_pipe( - prompt=prompt, - source_prompt=source_prompt, - generator=generator, - num_inference_steps=2, - image=init_image, - eta=0.1, - strength=0.8, - guidance_scale=3, - source_guidance_scale=1, - output_type="np", - ) + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs) images = output.images image_slice = images[0, -3:, -3:, -1] diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 8dce61c3a456..4748a41feae7 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -14,7 +14,6 @@ # limitations under the License. import gc -import random import tempfile import time import unittest @@ -31,11 +30,9 @@ PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, - UNet2DModel, - VQModel, logging, ) -from diffusers.utils import floats_tensor, load_numpy, slow, torch_device +from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import CaptureLogger, require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer @@ -46,39 +43,11 @@ class StableDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_uncond_unet(self): - torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model + pipeline_class = StableDiffusionPipeline - @property - def dummy_cond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -88,40 +57,15 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=9, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_vq_model(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, ) - return model - - @property - def dummy_vae(self): torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -129,12 +73,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -145,128 +85,63 @@ def dummy_text_encoder(self): pad_token_id=1, vocab_size=1000, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - return extract + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) image = output.images - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [ - 0.5643956661224365, - 0.6017904281616211, - 0.4799129366874695, - 0.5267305374145508, - 0.5584856271743774, - 0.46413588523864746, - 0.5159522294998169, - 0.4963662028312683, - 0.47919973731040955, - ] - ) + expected_slice = np.array([0.5643, 0.6017, 0.4799, 0.5267, 0.5584, 0.4641, 0.5159, 0.4963, 0.4791]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_ddim_factor_8(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - height=136, - width=136, - num_inference_steps=2, - output_type="np", - ) + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs, height=136, width=136) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -278,60 +153,20 @@ def test_stable_diffusion_ddim_factor_8(self): def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = PNDMScheduler(skip_prk_steps=True) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [ - 0.5094760060310364, - 0.5674174427986145, - 0.46675148606300354, - 0.5125715136528015, - 0.5696930289268494, - 0.4674668312072754, - 0.5277683734893799, - 0.4964486062526703, - 0.494540274143219, - ] - ) + expected_slice = np.array([0.5094, 0.5674, 0.4667, 0.5125, 0.5696, 0.4674, 0.5277, 0.4964, 0.4945]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( @@ -356,43 +191,17 @@ def test_stable_diffusion_no_safety_checker(self): def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( @@ -409,47 +218,20 @@ def test_stable_diffusion_k_lms(self): ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = EulerAncestralDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( @@ -466,47 +248,20 @@ def test_stable_diffusion_k_euler_ancestral(self): ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = EulerDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( @@ -523,112 +278,41 @@ def test_stable_diffusion_k_euler(self): ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_attention_chunk(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - # make sure chunking the attention yields the same result - sd_pipe.enable_attention_slicing(slice_size=1) - generator = torch.Generator(device=device).manual_seed(0) - output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 1e-4 def test_stable_diffusion_vae_slicing(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - image_count = 4 - generator = torch.Generator(device=device).manual_seed(0) - output_1 = sd_pipe( - [prompt] * image_count, generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np" - ) + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * image_count + output_1 = sd_pipe(**inputs) # make sure sliced vae decode yields the same result sd_pipe.enable_vae_slicing() - generator = torch.Generator(device=device).manual_seed(0) - output_2 = sd_pipe( - [prompt] * image_count, generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np" - ) + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * image_count + output_2 = sd_pipe(**inputs) # there is a small discrepancy at image borders vs. full batch decode assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 3e-3 def test_stable_diffusion_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" + inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - prompt, - negative_prompt=negative_prompt, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - ) + output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -651,22 +335,9 @@ def test_stable_diffusion_negative_prompt(self): def test_stable_diffusion_num_images_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) @@ -699,56 +370,10 @@ def test_stable_diffusion_num_images_per_prompt(self): assert images.shape == (batch_size * num_images_per_prompt, 64, 64, 3) - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_fp16(self): - """Test that stable diffusion works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe([prompt], generator=generator, num_inference_steps=2, output_type="np").images - - assert image.shape == (1, 64, 64, 3) - def test_stable_diffusion_long_prompt(self): - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) @@ -784,22 +409,9 @@ def test_stable_diffusion_long_prompt(self): assert cap_logger_3.out == "" def test_stable_diffusion_height_width_opt(self): - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py index 90bfef5efeea..4be95322d6b6 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py @@ -38,25 +38,11 @@ class StableDiffusionImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) + pipeline_class = StableDiffusionImageVariationPipeline - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_cond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -66,12 +52,9 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_vae(self): + scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -79,12 +62,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_image_encoder(self): torch.manual_seed(0) - config = CLIPVisionConfig( + image_encoder_config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, @@ -94,102 +73,58 @@ def dummy_image_encoder(self): image_size=32, patch_size=4, ) - return CLIPVisionModelWithProjection(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() - - return extract + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "image_encoder": image_encoder, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_img_variation_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - image_encoder = self.dummy_image_encoder - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImageVariationPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - image_encoder=image_encoder, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - init_image, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - init_image, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5093, 0.5717, 0.4806, 0.4891, 0.5552, 0.4594, 0.5177, 0.4894, 0.4904]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img_variation_multiple_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - image_encoder = self.dummy_image_encoder - - init_image = self.dummy_image.to(device).repeat(2, 1, 1, 1) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImageVariationPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - image_encoder=image_encoder, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - init_image, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - ) + inputs = self.get_dummy_inputs(device) + inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) + output = sd_pipe(**inputs) image = output.images @@ -201,103 +136,40 @@ def test_stable_diffusion_img_variation_multiple_images(self): def test_stable_diffusion_img_variation_num_images_per_prompt(self): device = "cpu" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - image_encoder = self.dummy_image_encoder - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImageVariationPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - image_encoder=image_encoder, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test num_images_per_prompt=1 (default) - images = sd_pipe( - init_image, - num_inference_steps=2, - output_type="np", - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs).images assert images.shape == (1, 64, 64, 3) # test num_images_per_prompt=1 (default) for batch of images batch_size = 2 - images = sd_pipe( - init_image.repeat(batch_size, 1, 1, 1), - num_inference_steps=2, - output_type="np", - ).images + inputs = self.get_dummy_inputs(device) + inputs["image"] = inputs["image"].repeat(batch_size, 1, 1, 1) + images = sd_pipe(**inputs).images assert images.shape == (batch_size, 64, 64, 3) # test num_images_per_prompt for single prompt num_images_per_prompt = 2 - images = sd_pipe( - init_image, - num_inference_steps=2, - output_type="np", - num_images_per_prompt=num_images_per_prompt, - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs, num_images_per_prompt=num_images_per_prompt).images assert images.shape == (num_images_per_prompt, 64, 64, 3) # test num_images_per_prompt for batch of prompts batch_size = 2 - images = sd_pipe( - init_image.repeat(batch_size, 1, 1, 1), - num_inference_steps=2, - output_type="np", - num_images_per_prompt=num_images_per_prompt, - ).images + inputs = self.get_dummy_inputs(device) + inputs["image"] = inputs["image"].repeat(batch_size, 1, 1, 1) + images = sd_pipe(**inputs, num_images_per_prompt=num_images_per_prompt).images assert images.shape == (batch_size * num_images_per_prompt, 64, 64, 3) - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_img_variation_fp16(self): - """Test that stable diffusion img2img works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - image_encoder = self.dummy_image_encoder - - init_image = self.dummy_image.to(torch_device).float() - - # put models in fp16 - unet = unet.half() - vae = vae.half() - image_encoder = image_encoder.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImageVariationPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - image_encoder=image_encoder, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe( - init_image, - generator=generator, - num_inference_steps=2, - output_type="np", - ).images - - assert image.shape == (1, 64, 64, 3) - @slow @require_torch_gpu diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py index 4d8247195504..7ce06403fa05 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -27,8 +27,6 @@ PNDMScheduler, StableDiffusionImg2ImgPipeline, UNet2DConditionModel, - UNet2DModel, - VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu @@ -41,39 +39,11 @@ class StableDiffusionImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_uncond_unet(self): - torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model + pipeline_class = StableDiffusionImg2ImgPipeline - @property - def dummy_cond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -83,40 +53,9 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=9, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_vq_model(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, - ) - return model - - @property - def dummy_vae(self): + scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -124,12 +63,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -140,113 +75,61 @@ def dummy_text_encoder(self): pad_token_id=1, vocab_size=1000, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - return extract + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_img2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4492, 0.3865, 0.4222, 0.5854, 0.5139, 0.4379, 0.4193, 0.48, 0.4218]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" + inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - prompt, - negative_prompt=negative_prompt, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) + output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -256,40 +139,15 @@ def test_stable_diffusion_img2img_negative_prompt(self): def test_stable_diffusion_img2img_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device).repeat(2, 1, 1, 1) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = 2 * ["A painting of a squirrel eating a burger"] - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - prompt, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) - - image = output.images - + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * 2 + inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) + image = sd_pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) @@ -298,171 +156,58 @@ def test_stable_diffusion_img2img_multiple_init_images(self): def test_stable_diffusion_img2img_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler( + beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - return_dict=False, - ) - image_from_tuple = output[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4367, 0.4986, 0.4372, 0.6706, 0.5665, 0.444, 0.5864, 0.6019, 0.5203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_num_images_per_prompt(self): - device = "cpu" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - # test num_images_per_prompt=1 (default) - images = sd_pipe( - prompt, - num_inference_steps=2, - output_type="np", - image=init_image, - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs).images assert images.shape == (1, 32, 32, 3) # test num_images_per_prompt=1 (default) for batch of prompts batch_size = 2 - images = sd_pipe( - [prompt] * batch_size, - num_inference_steps=2, - output_type="np", - image=init_image, - ).images + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * batch_size + images = sd_pipe(**inputs).images assert images.shape == (batch_size, 32, 32, 3) # test num_images_per_prompt for single prompt num_images_per_prompt = 2 - images = sd_pipe( - prompt, - num_inference_steps=2, - output_type="np", - image=init_image, - num_images_per_prompt=num_images_per_prompt, - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs, num_images_per_prompt=num_images_per_prompt).images assert images.shape == (num_images_per_prompt, 32, 32, 3) # test num_images_per_prompt for batch of prompts batch_size = 2 - images = sd_pipe( - [prompt] * batch_size, - num_inference_steps=2, - output_type="np", - image=init_image, - num_images_per_prompt=num_images_per_prompt, - ).images + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * batch_size + images = sd_pipe(**inputs, num_images_per_prompt=num_images_per_prompt).images assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3) - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_img2img_fp16(self): - """Test that stable diffusion img2img works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(torch_device) - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe( - [prompt], - generator=generator, - num_inference_steps=2, - output_type="np", - image=init_image, - ).images - - assert image.shape == (1, 32, 32, 3) - @slow @require_torch_gpu diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py index 6e6780d47ed6..f331209e64f4 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py @@ -26,8 +26,6 @@ PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel, - UNet2DModel, - VQModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device @@ -42,54 +40,11 @@ class StableDiffusionInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image + pipeline_class = StableDiffusionInpaintPipeline - @property - def dummy_uncond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model - - @property - def dummy_cond_unet(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -99,25 +54,9 @@ def dummy_cond_unet_inpaint(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_vq_model(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, - ) - return model - - @property - def dummy_vae(self): + scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -125,12 +64,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -141,226 +76,89 @@ def dummy_text_encoder(self): pad_token_id=1, vocab_size=1000, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() - - return extract - - def test_stable_diffusion_inpaint(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder + text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) + def test_stable_diffusion_inpaint(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4723, 0.5731, 0.3939, 0.5441, 0.5922, 0.4392, 0.5059, 0.4651, 0.4474]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_image_tensor(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.repeat(1, 1, 2, 2) - mask_image = image / 2 - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=image, - mask_image=mask_image[:, 0], - ) - out_1 = output.images + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + out_pil = output.images - image = image.cpu().permute(0, 2, 3, 1)[0] - mask_image = mask_image.cpu().permute(0, 2, 3, 1)[0] + inputs = self.get_dummy_inputs(device) + inputs["image"] = torch.tensor(np.array(inputs["image"]) / 127.5 - 1).permute(2, 0, 1).unsqueeze(0) + inputs["mask_image"] = torch.tensor(np.array(inputs["mask_image"]) / 255).permute(2, 0, 1)[:1].unsqueeze(0) + output = sd_pipe(**inputs) + out_tensor = output.images - image = Image.fromarray(np.uint8(image)).convert("RGB") - mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB") - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=image, - mask_image=mask_image, - ) - out_2 = output.images - - assert out_1.shape == (1, 64, 64, 3) - assert np.abs(out_1.flatten() - out_2.flatten()).max() < 5e-2 + assert out_pil.shape == (1, 64, 64, 3) + assert np.abs(out_pil.flatten() - out_tensor.flatten()).max() < 5e-2 def test_stable_diffusion_inpaint_with_num_images_per_prompt(self): device = "cpu" - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - images = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - num_images_per_prompt=2, - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs, num_images_per_prompt=2).images # check if the output is a list of 2 images assert len(images) == 2 - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_inpaint_fp16(self): - """Test that stable diffusion inpaint_legacy works with fp16""" - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe( - [prompt], - generator=generator, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - ).images - - assert image.shape == (1, 64, 64, 3) - @slow @require_torch_gpu diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py index 95ad79a25748..9207fa7fd4e4 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py @@ -35,13 +35,11 @@ from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class StableDiffusionInpaintLegacyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class StableDiffusionInpaintLegacyPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py index 0b3b69c2c24b..5f8644754204 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -14,7 +14,6 @@ # limitations under the License. import gc -import tempfile import unittest import numpy as np @@ -42,16 +41,11 @@ class StableDiffusion2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() + pipeline_class = StableDiffusionPipeline - @property - def dummy_cond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -64,12 +58,15 @@ def dummy_cond_unet(self): attention_head_dim=(2, 4, 8, 8), use_linear_projection=True, ) - return model - - @property - def dummy_vae(self): + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -78,12 +75,8 @@ def dummy_vae(self): latent_channels=4, sample_size=128, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -97,378 +90,118 @@ def dummy_text_encoder(self): hidden_act="gelu", projection_dim=512, ) - return CLIPTextModel(config) - - def test_save_pretrained_from_pretrained(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - - vae = self.dummy_vae - bert = self.dummy_text_encoder + text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - image = output.images - - with tempfile.TemporaryDirectory() as tmpdirname: - sd_pipe.save_pretrained(tmpdirname) - sd_pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) - sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - generator = generator.manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - new_image = output.images - - assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass" + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5649, 0.6022, 0.4804, 0.5270, 0.5585, 0.4643, 0.5159, 0.4963, 0.4793]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5099, 0.5677, 0.4671, 0.5128, 0.5697, 0.4676, 0.5277, 0.4964, 0.4946]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4717, 0.5376, 0.4568, 0.5225, 0.5734, 0.4797, 0.5467, 0.5074, 0.5043]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = EulerAncestralDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = EulerAncestralDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4715, 0.5376, 0.4569, 0.5224, 0.5734, 0.4797, 0.5465, 0.5074, 0.5046]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = EulerDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = EulerDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4717, 0.5376, 0.4568, 0.5225, 0.5734, 0.4797, 0.5467, 0.5074, 0.5043]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_attention_chunk(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - # make sure chunking the attention yields the same result - sd_pipe.enable_attention_slicing(slice_size=1) - generator = torch.Generator(device=device).manual_seed(0) - output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 1e-4 - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_fp16(self): - """Test that stable diffusion works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe([prompt], generator=generator, num_inference_steps=2, output_type="np").images - - assert image.shape == (1, 64, 64, 3) def test_stable_diffusion_long_prompt(self): - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py index b420570f0707..b2d387cb6890 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py @@ -22,7 +22,7 @@ from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import require_torch_gpu, slow from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer @@ -32,26 +32,12 @@ torch.backends.cuda.matmul.allow_tf32 = False -class StableDiffusionInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image +class StableDiffusion2InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = StableDiffusionInpaintPipeline - @property - def dummy_cond_unet_inpaint(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -64,25 +50,19 @@ def dummy_cond_unet_inpaint(self): attention_head_dim=(2, 4, 8, 8), use_linear_projection=True, ) - return model - - @property - def dummy_vae(self): + scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, + sample_size=128, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -96,129 +76,59 @@ def dummy_text_encoder(self): hidden_act="gelu", projection_dim=512, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() - - return extract - - def test_stable_diffusion_inpaint(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - text_encoder = self.dummy_text_encoder + text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) + def test_stable_diffusion_inpaint(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_inpaint_fp16(self): - """Test that stable diffusion inpaint works with fp16""" - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - text_encoder = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) - - # put models in fp16 - unet = unet.half() - vae = vae.half() - text_encoder = text_encoder.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe( - [prompt], - generator=generator, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - ).images - - assert image.shape == (1, 64, 64, 3) -# @slow +@slow @require_torch_gpu class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index ddb9b3358a84..0eb43b570b6b 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -26,13 +26,11 @@ from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class StableDiffusionUpscalePipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class StableDiffusionUpscalePipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index bbe4f4943697..5fe973b2ac14 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -32,13 +32,11 @@ from diffusers.utils.testing_utils import require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class StableDiffusion2VPredictionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py index dbb991479377..aa143bd63b96 100644 --- a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py +++ b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py @@ -27,13 +27,11 @@ from diffusers.utils.testing_utils import require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class SafeDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class SafeDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py index 9fb6ca522f5d..b8595378a58a 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py @@ -23,13 +23,11 @@ from diffusers import VersatileDiffusionDualGuidedPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class VersatileDiffusionDualGuidedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VersatileDiffusionDualGuidedPipelineFastTests(unittest.TestCase): pass diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py index 1711b752992f..4c8bef0e71b3 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py @@ -21,13 +21,11 @@ from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class VersatileDiffusionImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VersatileDiffusionImageVariationPipelineFastTests(unittest.TestCase): pass diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py index 31085aeb614d..3670894ed7d6 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py @@ -23,13 +23,11 @@ from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class VersatileDiffusionMegaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VersatileDiffusionMegaPipelineFastTests(unittest.TestCase): pass diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py index 027819efee9f..44514da0108b 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py @@ -23,13 +23,11 @@ from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class VersatileDiffusionTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VersatileDiffusionTextToImagePipelineFastTests(unittest.TestCase): pass diff --git a/tests/pipelines/vq_diffusion/test_vq_diffusion.py b/tests/pipelines/vq_diffusion/test_vq_diffusion.py index 87e29cbc97de..d992a5215298 100644 --- a/tests/pipelines/vq_diffusion/test_vq_diffusion.py +++ b/tests/pipelines/vq_diffusion/test_vq_diffusion.py @@ -25,13 +25,11 @@ from diffusers.utils.testing_utils import require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False -class VQDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VQDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index 072fe2fe76a9..f328a4400360 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -57,29 +57,6 @@ torch.backends.cuda.matmul.allow_tf32 = False -def test_progress_bar(capsys): - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - scheduler = DDPMScheduler(num_train_timesteps=10) - - ddpm = DDPMPipeline(model, scheduler).to(torch_device) - ddpm(output_type="numpy").images - captured = capsys.readouterr() - assert "10/10" in captured.err, "Progress bar has to be displayed" - - ddpm.set_progress_bar_config(disable=True) - ddpm(output_type="numpy").images - captured = capsys.readouterr() - assert captured.err == "", "Progress bar should be disabled" - - class DownloadTests(unittest.TestCase): def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: @@ -108,7 +85,7 @@ def test_returned_cached_folder(self): pipe_2 = StableDiffusionPipeline.from_pretrained(local_path) pipe = pipe.to(torch_device) - pipe_2 = pipe.to(torch_device) + pipe_2 = pipe_2.to(torch_device) if torch_device == "mps": # device type MPS is not supported for torch.Generator() api. generator = torch.manual_seed(0) diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index bf99925f1c4f..f18c939e1504 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -1,4 +1,22 @@ -from diffusers.utils.testing_utils import require_torch +import contextlib +import gc +import inspect +import io +import re +import tempfile +import time +import unittest +from typing import Callable, Union + +import numpy as np +import torch + +from diffusers import CycleDiffusionPipeline, DanceDiffusionPipeline, DiffusionPipeline, StableDiffusionImg2ImgPipeline +from diffusers.utils.import_utils import is_accelerate_available, is_xformers_available +from diffusers.utils.testing_utils import require_torch, torch_device + + +torch.backends.cuda.matmul.allow_tf32 = False @require_torch @@ -9,4 +27,347 @@ class PipelineTesterMixin: equivalence of dict and tuple outputs, etc. """ - pass + # set these parameters to False in the child class if the pipeline does not support the corresponding functionality + test_attention_slicing = True + test_cpu_offload = True + test_xformers_attention = True + + @property + def pipeline_class(self) -> Union[Callable, DiffusionPipeline]: + raise NotImplementedError( + "You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. " + "See existing pipeline tests for reference." + ) + + def get_dummy_components(self): + raise NotImplementedError( + "You need to implement `get_dummy_components(self)` in the child test class. " + "See existing pipeline tests for reference." + ) + + def get_dummy_inputs(self, device, seed=0): + raise NotImplementedError( + "You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. " + "See existing pipeline tests for reference." + ) + + def tearDown(self): + # clean up the VRAM after each test in case of CUDA runtime errors + super().tearDown() + gc.collect() + torch.cuda.empty_cache() + + def test_save_load_local(self): + if torch_device == "mps" and self.pipeline_class in ( + DanceDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionImg2ImgPipeline, + ): + # FIXME: inconsistent outputs on MPS + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output - output_loaded).max() + self.assertLess(max_diff, 1e-5) + + def test_dict_tuple_outputs_equivalent(self): + if torch_device == "mps" and self.pipeline_class in ( + DanceDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionImg2ImgPipeline, + ): + # FIXME: inconsistent outputs on MPS + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + + output = pipe(**self.get_dummy_inputs(torch_device))[0] + output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0] + + max_diff = np.abs(output - output_tuple).max() + self.assertLess(max_diff, 1e-5) + + def test_pipeline_call_implements_required_args(self): + required_args = ["num_inference_steps", "generator", "return_dict"] + + for arg in required_args: + self.assertTrue(arg in inspect.signature(self.pipeline_class.__call__).parameters) + + def test_num_inference_steps_consistent(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + + outputs = [] + times = [] + for num_steps in [3, 6, 9]: + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = num_steps + + start_time = time.time() + output = pipe(**inputs)[0] + inference_time = time.time() - start_time + + outputs.append(output) + times.append(inference_time) + + # check that all outputs have the same shape + self.assertTrue(all(outputs[0].shape == output.shape for output in outputs)) + # check that the inference time increases with the number of inference steps + self.assertTrue(all(times[i] > times[i - 1] for i in range(1, len(times)))) + + def test_components_function(self): + init_components = self.get_dummy_components() + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") + def test_float16_inference(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.half() + pipe_fp16 = self.pipeline_class(**components) + pipe_fp16.to(torch_device) + pipe_fp16.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(torch_device))[0] + output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0] + + max_diff = np.abs(output - output_fp16).max() + self.assertLess(max_diff, 1e-2, "The outputs of the fp16 and fp32 pipelines are too different.") + + @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") + def test_save_load_float16(self): + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.to(torch_device).half() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for name, component in pipe_loaded.components.items(): + if hasattr(component, "dtype"): + self.assertTrue( + component.dtype == torch.float16, + f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output - output_loaded).max() + self.assertLess(max_diff, 3e-3, "The output of the fp16 pipeline changed after saving and loading.") + + def test_save_load_optional_components(self): + if not hasattr(self.pipeline_class, "_optional_components"): + return + + if torch_device == "mps" and self.pipeline_class in ( + DanceDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionImg2ImgPipeline, + ): + # FIXME: inconsistent outputs on MPS + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output - output_loaded).max() + self.assertLess(max_diff, 1e-5) + + @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to("cuda") + model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] + self.assertTrue(all(device == "cuda" for device in model_devices)) + + output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] + self.assertTrue(np.isnan(output_cuda).sum() == 0) + + def test_attention_slicing_forward_pass(self): + if not self.test_attention_slicing: + return + + if torch_device == "mps" and self.pipeline_class in ( + DanceDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionImg2ImgPipeline, + ): + # FIXME: inconsistent outputs on MPS + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + + inputs = self.get_dummy_inputs(torch_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(torch_device) + output_with_slicing = pipe(**inputs)[0] + + max_diff = np.abs(output_with_slicing - output_without_slicing).max() + self.assertLess(max_diff, 1e-5, "Attention slicing should not affect the inference results") + + @unittest.skipIf( + torch_device != "cuda" or not is_accelerate_available(), + reason="CPU offload is only available with CUDA and `accelerate` installed", + ) + def test_cpu_offload_forward_pass(self): + if not self.test_cpu_offload: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs)[0] + + pipe.enable_sequential_cpu_offload() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs)[0] + + max_diff = np.abs(output_with_offload - output_without_offload).max() + self.assertLess(max_diff, 1e-5, "CPU offloading should not affect the inference results") + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forward_pass(self): + if not self.test_xformers_attention: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs)[0] + + pipe.enable_xformers_memory_efficient_attention() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs)[0] + + max_diff = np.abs(output_with_offload - output_without_offload).max() + self.assertLess(max_diff, 1e-5, "XFormers attention should not affect the inference results") + + def test_progress_bar(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + stderr = stderr.getvalue() + # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, + # so we just match "5" in "#####| 1/5 [00:01<00:00]" + max_steps = re.search("/(.*?) ", stderr).group(1) + self.assertTrue(max_steps is not None and len(max_steps) > 0) + self.assertTrue( + f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" + ) + + pipe.set_progress_bar_config(disable=True) + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled")