From 38420b9c44d30b48045ef9513c73b532e2d06539 Mon Sep 17 00:00:00 2001 From: anton- Date: Fri, 2 Dec 2022 18:05:05 +0100 Subject: [PATCH 01/24] [WIP] Standardize fast pipeline tests with PipelineTestMixin --- src/diffusers/pipelines/ddim/pipeline_ddim.py | 4 +- tests/pipelines/ddim/test_ddim.py | 41 +- .../stable_diffusion/test_stable_diffusion.py | 394 +++--------------- tests/test_pipelines_common.py | 84 +++- 4 files changed, 157 insertions(+), 366 deletions(-) diff --git a/src/diffusers/pipelines/ddim/pipeline_ddim.py b/src/diffusers/pipelines/ddim/pipeline_ddim.py index b9e590dea646..f5f2d404c2b4 100644 --- a/src/diffusers/pipelines/ddim/pipeline_ddim.py +++ b/src/diffusers/pipelines/ddim/pipeline_ddim.py @@ -96,10 +96,10 @@ def __call__( if self.device.type == "mps": # randn does not work reproducibly on mps - image = torch.randn(image_shape, generator=generator) + image = torch.randn(image_shape, generator=generator, dtype=self.unet.dtype) image = image.to(self.device) else: - image = torch.randn(image_shape, generator=generator, device=self.device) + image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(num_inference_steps) diff --git a/tests/pipelines/ddim/test_ddim.py b/tests/pipelines/ddim/test_ddim.py index 2d03383599e0..1d03da48dbd1 100644 --- a/tests/pipelines/ddim/test_ddim.py +++ b/tests/pipelines/ddim/test_ddim.py @@ -28,10 +28,11 @@ class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - @property - def dummy_uncond_unet(self): + pipeline_class = DDIMPipeline + + def get_common_pipeline_components(self): torch.manual_seed(0) - model = UNet2DModel( + unet = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -40,32 +41,34 @@ def dummy_uncond_unet(self): down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) - return model - - def test_inference(self): - device = "cpu" - unet = self.dummy_uncond_unet scheduler = DDIMScheduler() + components = {"unet": unet, "scheduler": scheduler} + return components - ddpm = DDIMPipeline(unet=unet, scheduler=scheduler) - ddpm.to(device) - ddpm.set_progress_bar_config(disable=None) + def get_common_inputs(self, device, seed=0): + inputs = { + "generator": torch.Generator(device=device).manual_seed(seed), + "num_inference_steps": 2, + "output_type": "numpy", + } + return inputs - generator = torch.Generator(device=device).manual_seed(0) - image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images + def test_inference(self): + device = "cpu" - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0] + pipe = self.pipeline_class(**self.get_common_pipeline_components()) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + image = pipe(**self.get_common_inputs(device)).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - assert image.shape == (1, 32, 32, 3) + self.assertEqual(image.shape, (1, 32, 32, 3)) expected_slice = np.array( [1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] ) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) @slow diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 8dce61c3a456..7feb16d0b36b 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -46,39 +46,11 @@ class StableDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_uncond_unet(self): - torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model + pipeline_class = StableDiffusionPipeline - @property - def dummy_cond_unet(self): + def get_common_pipeline_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -88,40 +60,15 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=9, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_vq_model(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, ) - return model - - @property - def dummy_vae(self): torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -129,12 +76,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -145,128 +88,55 @@ def dummy_text_encoder(self): pad_token_id=1, vocab_size=1000, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - return extract + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_common_inputs(self, device, seed=0): + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": torch.Generator(device=device).manual_seed(seed), + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + output = sd_pipe(**self.get_common_inputs(device)) image = output.images - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [ - 0.5643956661224365, - 0.6017904281616211, - 0.4799129366874695, - 0.5267305374145508, - 0.5584856271743774, - 0.46413588523864746, - 0.5159522294998169, - 0.4963662028312683, - 0.47919973731040955, - ] - ) + expected_slice = np.array([0.5643, 0.6017, 0.4799, 0.5267, 0.5584, 0.4641, 0.5159, 0.4963, 0.4791]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_ddim_factor_8(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - height=136, - width=136, - num_inference_steps=2, - output_type="np", - ) + output = sd_pipe(**self.get_common_inputs(device), height=136, width=136) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -278,60 +148,18 @@ def test_stable_diffusion_ddim_factor_8(self): def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + sd_pipe.scheduler = PNDMScheduler(skip_prk_steps=True) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + output = sd_pipe(**self.get_common_inputs(device)) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) - expected_slice = np.array( - [ - 0.5094760060310364, - 0.5674174427986145, - 0.46675148606300354, - 0.5125715136528015, - 0.5696930289268494, - 0.4674668312072754, - 0.5277683734893799, - 0.4964486062526703, - 0.494540274143219, - ] - ) + expected_slice = np.array([0.5094, 0.5674, 0.4667, 0.5125, 0.5696, 0.4674, 0.5277, 0.4964, 0.4945]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( @@ -356,43 +184,15 @@ def test_stable_diffusion_no_safety_checker(self): def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + output = sd_pipe(**self.get_common_inputs(device)) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( @@ -409,47 +209,18 @@ def test_stable_diffusion_k_lms(self): ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = EulerAncestralDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + output = sd_pipe(**self.get_common_inputs(device)) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( @@ -466,47 +237,18 @@ def test_stable_diffusion_k_euler_ancestral(self): ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = EulerDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + output = sd_pipe(**self.get_common_inputs(device)) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( @@ -523,7 +265,6 @@ def test_stable_diffusion_k_euler(self): ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_attention_chunk(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator @@ -699,39 +440,6 @@ def test_stable_diffusion_num_images_per_prompt(self): assert images.shape == (batch_size * num_images_per_prompt, 64, 64, 3) - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_fp16(self): - """Test that stable diffusion works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe([prompt], generator=generator, num_inference_steps=2, output_type="np").images - - assert image.shape == (1, 64, 64, 3) - def test_stable_diffusion_long_prompt(self): unet = self.dummy_cond_unet scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index bf99925f1c4f..30086242bf5c 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -1,4 +1,13 @@ -from diffusers.utils.testing_utils import require_torch +import tempfile +import unittest + +import numpy as np +import torch + +from diffusers.utils.testing_utils import require_torch, torch_device + + +torch.backends.cuda.matmul.allow_tf32 = False @require_torch @@ -9,4 +18,75 @@ class PipelineTesterMixin: equivalence of dict and tuple outputs, etc. """ - pass + def test_save_load(self): + device = "cpu" + pipe = self.pipeline_class(**self.get_common_pipeline_components()) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_common_inputs(device))[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(device) + pipe_loaded.set_progress_bar_config(disable=None) + + output_loaded = pipe_loaded(**self.get_common_inputs(device))[0] + + max_diff = np.abs(output - output_loaded).max() + self.assertLessEqual(max_diff, 1e-5) + + def test_tuple_output(self): + device = "cpu" + pipe = self.pipeline_class(**self.get_common_pipeline_components()) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_common_inputs(device))[0] + output_tuple = pipe(**self.get_common_inputs(device), return_dict=False)[0] + + max_diff = np.abs(output - output_tuple).max() + self.assertLessEqual(max_diff, 1e-5) + + @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") + def test_float16(self): + device = "cuda" + components = self.get_common_pipeline_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.half() + pipe_fp16 = self.pipeline_class(**components) + pipe_fp16.to(device) + pipe_fp16.set_progress_bar_config(disable=None) + + output = pipe(**self.get_common_inputs(device))[0] + output_fp16 = pipe_fp16(**self.get_common_inputs(device))[0] + + max_diff = np.abs(output - output_fp16).max() + # the outputs can be different, but not too much + self.assertLessEqual(max_diff, 1e-2) + + @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") + def test_to_device(self): + components = self.get_common_pipeline_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_common_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to("cuda") + model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] + self.assertTrue(all(device == "cuda" for device in model_devices)) + + output_cuda = pipe(**self.get_common_inputs("cuda"))[0] + self.assertTrue(np.isnan(output_cuda).sum() == 0) From 9349279ababe517596550a3d69470483ca1175cf Mon Sep 17 00:00:00 2001 From: anton- Date: Mon, 5 Dec 2022 09:33:01 +0100 Subject: [PATCH 02/24] refactor the sd tests a bit --- tests/pipelines/ddim/test_ddim.py | 6 +- .../stable_diffusion/test_stable_diffusion.py | 130 ++++++------------ tests/test_pipelines_common.py | 12 +- 3 files changed, 57 insertions(+), 91 deletions(-) diff --git a/tests/pipelines/ddim/test_ddim.py b/tests/pipelines/ddim/test_ddim.py index 1d03da48dbd1..10bf08cfc261 100644 --- a/tests/pipelines/ddim/test_ddim.py +++ b/tests/pipelines/ddim/test_ddim.py @@ -56,11 +56,13 @@ def get_common_inputs(self, device, seed=0): def test_inference(self): device = "cpu" - pipe = self.pipeline_class(**self.get_common_pipeline_components()) + components = self.get_common_pipeline_components() + pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) - image = pipe(**self.get_common_inputs(device)).images + inputs = self.get_common_inputs(device) + image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 32, 32, 3)) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 7feb16d0b36b..fcd153d22fe4 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -115,11 +115,13 @@ def get_common_inputs(self, device, seed=0): def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + components = self.get_common_pipeline_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - output = sd_pipe(**self.get_common_inputs(device)) + inputs = self.get_common_inputs(device) + output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -132,11 +134,13 @@ def test_stable_diffusion_ddim(self): def test_stable_diffusion_ddim_factor_8(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + components = self.get_common_pipeline_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - output = sd_pipe(**self.get_common_inputs(device), height=136, width=136) + inputs = self.get_common_inputs(device) + output = sd_pipe(**inputs, height=136, width=136) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -148,12 +152,14 @@ def test_stable_diffusion_ddim_factor_8(self): def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + components = self.get_common_pipeline_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = PNDMScheduler(skip_prk_steps=True) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - output = sd_pipe(**self.get_common_inputs(device)) + inputs = self.get_common_inputs(device) + output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -185,12 +191,14 @@ def test_stable_diffusion_no_safety_checker(self): def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + components = self.get_common_pipeline_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - output = sd_pipe(**self.get_common_inputs(device)) + inputs = self.get_common_inputs(device) + output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -213,12 +221,14 @@ def test_stable_diffusion_k_lms(self): def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + components = self.get_common_pipeline_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - output = sd_pipe(**self.get_common_inputs(device)) + inputs = self.get_common_inputs(device) + output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -241,12 +251,14 @@ def test_stable_diffusion_k_euler_ancestral(self): def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - sd_pipe = StableDiffusionPipeline(**self.get_common_pipeline_components()) + components = self.get_common_pipeline_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - output = sd_pipe(**self.get_common_inputs(device)) + inputs = self.get_common_inputs(device) + output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -268,108 +280,56 @@ def test_stable_diffusion_k_euler(self): def test_stable_diffusion_attention_chunk(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_common_pipeline_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + inputs = self.get_common_inputs(device) + output_1 = sd_pipe(**inputs) # make sure chunking the attention yields the same result sd_pipe.enable_attention_slicing(slice_size=1) - generator = torch.Generator(device=device).manual_seed(0) - output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + inputs = self.get_common_inputs(device) + output_2 = sd_pipe(**inputs) assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 1e-4 def test_stable_diffusion_vae_slicing(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_common_pipeline_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - image_count = 4 - generator = torch.Generator(device=device).manual_seed(0) - output_1 = sd_pipe( - [prompt] * image_count, generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np" - ) + inputs = self.get_common_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * image_count + output_1 = sd_pipe(**inputs) # make sure sliced vae decode yields the same result sd_pipe.enable_vae_slicing() - generator = torch.Generator(device=device).manual_seed(0) - output_2 = sd_pipe( - [prompt] * image_count, generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np" - ) + inputs = self.get_common_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * image_count + output_2 = sd_pipe(inputs) # there is a small discrepancy at image borders vs. full batch decode assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 3e-3 def test_stable_diffusion_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_common_pipeline_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" + inputs = self.get_common_inputs(device) negative_prompt = "french fries" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - prompt, - negative_prompt=negative_prompt, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - ) + output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index 30086242bf5c..efa1b8537b67 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -20,11 +20,13 @@ class PipelineTesterMixin: def test_save_load(self): device = "cpu" - pipe = self.pipeline_class(**self.get_common_pipeline_components()) + components = self.get_common_pipeline_components() + pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) - output = pipe(**self.get_common_inputs(device))[0] + inputs = self.get_common_inputs(device) + output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) @@ -39,11 +41,13 @@ def test_save_load(self): def test_tuple_output(self): device = "cpu" - pipe = self.pipeline_class(**self.get_common_pipeline_components()) + components = self.get_common_pipeline_components() + pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) - output = pipe(**self.get_common_inputs(device))[0] + inputs = self.get_common_inputs(device) + output = pipe(**inputs)[0] output_tuple = pipe(**self.get_common_inputs(device), return_dict=False)[0] max_diff = np.abs(output - output_tuple).max() From f5ae7ec8e9b88e58b1afd62f2c43a7ccd9760b95 Mon Sep 17 00:00:00 2001 From: anton- Date: Mon, 5 Dec 2022 15:33:05 +0100 Subject: [PATCH 03/24] add more common tests --- tests/pipelines/ddim/test_ddim.py | 8 +- .../stable_diffusion/test_stable_diffusion.py | 108 ++++------- tests/test_pipelines_common.py | 167 +++++++++++++++--- 3 files changed, 184 insertions(+), 99 deletions(-) diff --git a/tests/pipelines/ddim/test_ddim.py b/tests/pipelines/ddim/test_ddim.py index 10bf08cfc261..316ea86e96bc 100644 --- a/tests/pipelines/ddim/test_ddim.py +++ b/tests/pipelines/ddim/test_ddim.py @@ -30,7 +30,7 @@ class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DDIMPipeline - def get_common_pipeline_components(self): + def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DModel( block_out_channels=(32, 64), @@ -45,7 +45,7 @@ def get_common_pipeline_components(self): components = {"unet": unet, "scheduler": scheduler} return components - def get_common_inputs(self, device, seed=0): + def get_dummy_inputs(self, device, seed=0): inputs = { "generator": torch.Generator(device=device).manual_seed(seed), "num_inference_steps": 2, @@ -56,12 +56,12 @@ def get_common_inputs(self, device, seed=0): def test_inference(self): device = "cpu" - components = self.get_common_pipeline_components() + components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index fcd153d22fe4..bddaf7f4ce0e 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -14,7 +14,6 @@ # limitations under the License. import gc -import random import tempfile import time import unittest @@ -31,11 +30,9 @@ PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, - UNet2DModel, - VQModel, logging, ) -from diffusers.utils import floats_tensor, load_numpy, slow, torch_device +from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import CaptureLogger, require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer @@ -48,7 +45,7 @@ class StableDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionPipeline - def get_common_pipeline_components(self): + def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), @@ -102,7 +99,7 @@ def get_common_pipeline_components(self): } return components - def get_common_inputs(self, device, seed=0): + def get_dummy_inputs(self, device, seed=0): inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": torch.Generator(device=device).manual_seed(seed), @@ -115,12 +112,12 @@ def get_common_inputs(self, device, seed=0): def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_common_pipeline_components() + components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images @@ -134,12 +131,12 @@ def test_stable_diffusion_ddim(self): def test_stable_diffusion_ddim_factor_8(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_common_pipeline_components() + components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, height=136, width=136) image = output.images @@ -152,13 +149,13 @@ def test_stable_diffusion_ddim_factor_8(self): def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_common_pipeline_components() + components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = PNDMScheduler(skip_prk_steps=True) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -191,13 +188,13 @@ def test_stable_diffusion_no_safety_checker(self): def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_common_pipeline_components() + components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -221,13 +218,13 @@ def test_stable_diffusion_k_lms(self): def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_common_pipeline_components() + components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -251,13 +248,13 @@ def test_stable_diffusion_k_euler_ancestral(self): def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_common_pipeline_components() + components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -280,25 +277,25 @@ def test_stable_diffusion_k_euler(self): def test_stable_diffusion_attention_chunk(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_common_pipeline_components() + components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) output_1 = sd_pipe(**inputs) # make sure chunking the attention yields the same result sd_pipe.enable_attention_slicing(slice_size=1) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) output_2 = sd_pipe(**inputs) assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 1e-4 def test_stable_diffusion_vae_slicing(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_common_pipeline_components() + components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) @@ -306,28 +303,28 @@ def test_stable_diffusion_vae_slicing(self): image_count = 4 - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count output_1 = sd_pipe(**inputs) # make sure sliced vae decode yields the same result sd_pipe.enable_vae_slicing() - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count - output_2 = sd_pipe(inputs) + output_2 = sd_pipe(**inputs) # there is a small discrepancy at image borders vs. full batch decode assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 3e-3 def test_stable_diffusion_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_common_pipeline_components() + components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) @@ -352,22 +349,9 @@ def test_stable_diffusion_negative_prompt(self): def test_stable_diffusion_num_images_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) @@ -401,22 +385,9 @@ def test_stable_diffusion_num_images_per_prompt(self): assert images.shape == (batch_size * num_images_per_prompt, 64, 64, 3) def test_stable_diffusion_long_prompt(self): - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) @@ -452,22 +423,9 @@ def test_stable_diffusion_long_prompt(self): assert cap_logger_3.out == "" def test_stable_diffusion_height_width_opt(self): - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index efa1b8537b67..b254015b65b6 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -1,9 +1,14 @@ +import gc +import inspect import tempfile +import time import unittest +from typing import Callable, Union import numpy as np import torch +from diffusers import DiffusionPipeline from diffusers.utils.testing_utils import require_torch, torch_device @@ -18,14 +23,38 @@ class PipelineTesterMixin: equivalence of dict and tuple outputs, etc. """ - def test_save_load(self): + @property + def pipeline_class(self) -> Union[Callable, DiffusionPipeline]: + raise NotImplementedError( + "You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. " + "See existing pipeline tests for reference." + ) + + def get_dummy_components(self): + raise NotImplementedError( + "You need to implement `get_dummy_components(self)` in the child test class. " + "See existing pipeline tests for reference." + ) + + def get_dummy_inputs(self, device, seed=0): + raise NotImplementedError( + "You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. " + "See existing pipeline tests for reference." + ) + + def tearDown(self): + # clean up the VRAM after each test in case of CUDA runtime errors + super().tearDown() + gc.collect() + torch.cuda.empty_cache() + + def test_save_load_local(self): device = "cpu" - components = self.get_common_pipeline_components() + components = self.get_dummy_components() pipe = self.pipeline_class(**components) - pipe.to(device) pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) + inputs = self.get_dummy_inputs(device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: @@ -34,29 +63,63 @@ def test_save_load(self): pipe_loaded.to(device) pipe_loaded.set_progress_bar_config(disable=None) - output_loaded = pipe_loaded(**self.get_common_inputs(device))[0] + output_loaded = pipe_loaded(**self.get_dummy_inputs(device))[0] max_diff = np.abs(output - output_loaded).max() self.assertLessEqual(max_diff, 1e-5) - def test_tuple_output(self): + def test_dict_tuple_outputs_equivalent(self): device = "cpu" - components = self.get_common_pipeline_components() + components = self.get_dummy_components() pipe = self.pipeline_class(**components) - pipe.to(device) pipe.set_progress_bar_config(disable=None) - inputs = self.get_common_inputs(device) - output = pipe(**inputs)[0] - output_tuple = pipe(**self.get_common_inputs(device), return_dict=False)[0] + output = pipe(**self.get_dummy_inputs(device))[0] + output_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] max_diff = np.abs(output - output_tuple).max() self.assertLessEqual(max_diff, 1e-5) + def test_pipeline_call_implements_required_args(self): + required_args = ["num_inference_steps", "generator", "return_dict"] + + for arg in required_args: + self.assertTrue(arg in inspect.signature(self.pipeline_class.__call__).parameters) + + def test_num_inference_steps_consistent(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + outputs = [] + times = [] + for num_steps in [1, 3, 6]: + inputs = self.get_dummy_inputs("cpu") + inputs["num_inference_steps"] = num_steps + + start_time = time.time() + output = pipe(**inputs)[0] + inference_time = time.time() - start_time + + outputs.append(output) + times.append(inference_time) + + # check that all outputs have the same shape + self.assertTrue(all(outputs[0].shape == output.shape for output in outputs)) + # check that the inference time increases with the number of inference steps + self.assertTrue(all(times[i] > times[i - 1] for i in range(1, len(times)))) + + def test_components_function(self): + init_components = self.get_dummy_components() + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") - def test_float16(self): + def test_float16_inference(self): device = "cuda" - components = self.get_common_pipeline_components() + components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) @@ -68,16 +131,80 @@ def test_float16(self): pipe_fp16.to(device) pipe_fp16.set_progress_bar_config(disable=None) - output = pipe(**self.get_common_inputs(device))[0] - output_fp16 = pipe_fp16(**self.get_common_inputs(device))[0] + output = pipe(**self.get_dummy_inputs(device))[0] + output_fp16 = pipe_fp16(**self.get_dummy_inputs(device))[0] max_diff = np.abs(output - output_fp16).max() - # the outputs can be different, but not too much - self.assertLessEqual(max_diff, 1e-2) + self.assertLessEqual(max_diff, 1e-2, "The outputs of the fp16 and fp32 pipelines are too different.") + + @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") + def test_save_load_float16(self): + device = "cuda" + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.to(device).half() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(device) + pipe_loaded.set_progress_bar_config(disable=None) + + for name, component in pipe_loaded.components.items(): + if hasattr(component, "dtype"): + self.assertTrue( + component.dtype == torch.float16, + f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", + ) + + output_loaded = pipe_loaded(**self.get_dummy_inputs(device))[0] + + max_diff = np.abs(output - output_loaded).max() + self.assertLessEqual(max_diff, 1e-5, "The output of the fp16 pipeline changed after saving and loading.") + + def test_save_load_optional_components(self): + if not hasattr(self.pipeline_class, "_optional_components"): + return + + device = "cpu" + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + inputs = self.get_dummy_inputs(device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + output_loaded = pipe_loaded(**self.get_dummy_inputs(device))[0] + + max_diff = np.abs(output - output_loaded).max() + self.assertLessEqual(max_diff, 1e-5) @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): - components = self.get_common_pipeline_components() + components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) @@ -85,12 +212,12 @@ def test_to_device(self): model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cpu" for device in model_devices)) - output_cpu = pipe(**self.get_common_inputs("cpu"))[0] + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cuda" for device in model_devices)) - output_cuda = pipe(**self.get_common_inputs("cuda"))[0] + output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(output_cuda).sum() == 0) From d898a2407bdea9fa5cee1c249c95e6f360f6bcfc Mon Sep 17 00:00:00 2001 From: anton- Date: Mon, 5 Dec 2022 16:56:02 +0100 Subject: [PATCH 04/24] add xformers --- .../stable_diffusion/test_stable_diffusion.py | 18 --- tests/test_pipelines_common.py | 108 +++++++++++++++--- 2 files changed, 91 insertions(+), 35 deletions(-) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index bddaf7f4ce0e..2c544472efb2 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -275,24 +275,6 @@ def test_stable_diffusion_k_euler(self): ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - def test_stable_diffusion_attention_chunk(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) - sd_pipe = StableDiffusionPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - output_1 = sd_pipe(**inputs) - - # make sure chunking the attention yields the same result - sd_pipe.enable_attention_slicing(slice_size=1) - inputs = self.get_dummy_inputs(device) - output_2 = sd_pipe(**inputs) - - assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 1e-4 - def test_stable_diffusion_vae_slicing(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index b254015b65b6..7f39083b4525 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -9,6 +9,7 @@ import torch from diffusers import DiffusionPipeline +from diffusers.utils.import_utils import is_accelerate_available, is_xformers_available from diffusers.utils.testing_utils import require_torch, torch_device @@ -23,6 +24,11 @@ class PipelineTesterMixin: equivalence of dict and tuple outputs, etc. """ + # set these parameters to False in the child class if the pipeline does not support the corresponding functionality + test_attention_slicing = True + test_cpu_offload = True + test_xformers_attention = True + @property def pipeline_class(self) -> Union[Callable, DiffusionPipeline]: raise NotImplementedError( @@ -49,36 +55,37 @@ def tearDown(self): torch.cuda.empty_cache() def test_save_load_local(self): - device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) + pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) - inputs = self.get_dummy_inputs(device) + inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) - pipe_loaded.to(device) + pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) - output_loaded = pipe_loaded(**self.get_dummy_inputs(device))[0] + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() - self.assertLessEqual(max_diff, 1e-5) + self.assertLess(max_diff, 1e-5) def test_dict_tuple_outputs_equivalent(self): - device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) + pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) - output = pipe(**self.get_dummy_inputs(device))[0] - output_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] + output = pipe(**self.get_dummy_inputs(torch_device))[0] + output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0] max_diff = np.abs(output - output_tuple).max() - self.assertLessEqual(max_diff, 1e-5) + self.assertLess(max_diff, 1e-5) def test_pipeline_call_implements_required_args(self): required_args = ["num_inference_steps", "generator", "return_dict"] @@ -89,12 +96,13 @@ def test_pipeline_call_implements_required_args(self): def test_num_inference_steps_consistent(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) + pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) outputs = [] times = [] for num_steps in [1, 3, 6]: - inputs = self.get_dummy_inputs("cpu") + inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps start_time = time.time() @@ -135,7 +143,7 @@ def test_float16_inference(self): output_fp16 = pipe_fp16(**self.get_dummy_inputs(device))[0] max_diff = np.abs(output - output_fp16).max() - self.assertLessEqual(max_diff, 1e-2, "The outputs of the fp16 and fp32 pipelines are too different.") + self.assertLess(max_diff, 1e-2, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): @@ -167,28 +175,28 @@ def test_save_load_float16(self): output_loaded = pipe_loaded(**self.get_dummy_inputs(device))[0] max_diff = np.abs(output - output_loaded).max() - self.assertLessEqual(max_diff, 1e-5, "The output of the fp16 pipeline changed after saving and loading.") + self.assertLess(max_diff, 1e-5, "The output of the fp16 pipeline changed after saving and loading.") def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return - device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) + pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) - inputs = self.get_dummy_inputs(device) + inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) - pipe_loaded.to(device) + pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: @@ -197,10 +205,11 @@ def test_save_load_optional_components(self): f"`{optional_component}` did not stay set to None after loading.", ) - output_loaded = pipe_loaded(**self.get_dummy_inputs(device))[0] + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() - self.assertLessEqual(max_diff, 1e-5) + self.assertLess(max_diff, 1e-5) @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): @@ -221,3 +230,68 @@ def test_to_device(self): output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(output_cuda).sum() == 0) + + def test_attention_slicing_forward_pass(self): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_slicing = pipe(**inputs) + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(torch_device) + output_with_slicing = pipe(**inputs) + + max_diff = np.abs(output_with_slicing.images - output_without_slicing.images).max() + self.assertLess(max_diff, 1e-5, "Attention slicing should not affect the inference results") + + @unittest.skipIf( + torch_device != "cuda" or not is_accelerate_available(), + reason="XFormers attention is only available with CUDA and `accelerate` installed", + ) + def test_cpu_offload_forward_pass(self): + if not self.test_cpu_offload: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs) + + pipe.enable_sequential_cpu_offload() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs) + + max_diff = np.abs(output_with_offload.images - output_without_offload.images).max() + self.assertLess(max_diff, 1e-5, "CPU offloading should not affect the inference results") + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forward_pass(self): + if not self.test_xformers_attention: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs) + + pipe.enable_xformers_memory_efficient_attention() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs) + + max_diff = np.abs(output_with_offload.images - output_without_offload.images).max() + self.assertLess(max_diff, 1e-5, "XFormers attention should not affect the inference results") From ce1b7768084caed0a5ea1818c73014e2561d8782 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 12:44:56 +0100 Subject: [PATCH 05/24] add progressbar test --- tests/test_pipelines.py | 25 +------------------------ tests/test_pipelines_common.py | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index 630cec65ffaf..5d7c0ab48743 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -55,29 +55,6 @@ torch.backends.cuda.matmul.allow_tf32 = False -def test_progress_bar(capsys): - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - scheduler = DDPMScheduler(num_train_timesteps=10) - - ddpm = DDPMPipeline(model, scheduler).to(torch_device) - ddpm(output_type="numpy").images - captured = capsys.readouterr() - assert "10/10" in captured.err, "Progress bar has to be displayed" - - ddpm.set_progress_bar_config(disable=True) - ddpm(output_type="numpy").images - captured = capsys.readouterr() - assert captured.err == "", "Progress bar should be disabled" - - class DownloadTests(unittest.TestCase): def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: @@ -106,7 +83,7 @@ def test_returned_cached_folder(self): pipe_2 = StableDiffusionPipeline.from_pretrained(local_path) pipe = pipe.to(torch_device) - pipe_2 = pipe.to(torch_device) + pipe_2 = pipe_2.to(torch_device) if torch_device == "mps": # device type MPS is not supported for torch.Generator() api. generator = torch.manual_seed(0) diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index 7f39083b4525..77206b84eed2 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -1,5 +1,7 @@ +import contextlib import gc import inspect +import io import tempfile import time import unittest @@ -295,3 +297,22 @@ def test_xformers_attention_forward_pass(self): max_diff = np.abs(output_with_offload.images - output_without_offload.images).max() self.assertLess(max_diff, 1e-5, "XFormers attention should not affect the inference results") + + def test_progress_bar(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + num_steps = inputs["num_inference_steps"] + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + self.assertTrue( + f"{num_steps}/{num_steps}" in stderr.getvalue(), + "Progress bar should be enabled, displaying the requested `num_inference_steps`", + ) + + pipe.set_progress_bar_config(disable=True) + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") From 26d7a3bc22bea96ce440fcab8b8fc57f8cd0d413 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 12:58:25 +0100 Subject: [PATCH 06/24] cleanup --- tests/test_pipelines_common.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index 77206b84eed2..be12a8e90bbd 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -128,43 +128,41 @@ def test_components_function(self): @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self): - device = "cuda" components = self.get_dummy_components() pipe = self.pipeline_class(**components) - pipe.to(device) + pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) for name, module in components.items(): if hasattr(module, "half"): components[name] = module.half() pipe_fp16 = self.pipeline_class(**components) - pipe_fp16.to(device) + pipe_fp16.to(torch_device) pipe_fp16.set_progress_bar_config(disable=None) - output = pipe(**self.get_dummy_inputs(device))[0] - output_fp16 = pipe_fp16(**self.get_dummy_inputs(device))[0] + output = pipe(**self.get_dummy_inputs(torch_device))[0] + output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0] max_diff = np.abs(output - output_fp16).max() self.assertLess(max_diff, 1e-2, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): - device = "cuda" components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): - components[name] = module.to(device).half() + components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) - pipe.to(device) + pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) - inputs = self.get_dummy_inputs(device) + inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) - pipe_loaded.to(device) + pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): @@ -174,10 +172,11 @@ def test_save_load_float16(self): f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) - output_loaded = pipe_loaded(**self.get_dummy_inputs(device))[0] + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() - self.assertLess(max_diff, 1e-5, "The output of the fp16 pipeline changed after saving and loading.") + self.assertLess(max_diff, 5e-3, "The output of the fp16 pipeline changed after saving and loading.") def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): From 836fbe2c6bce8f53c1e484ff6cde716f16f156a5 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 13:01:58 +0100 Subject: [PATCH 07/24] upd fp16 --- tests/test_pipelines_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index be12a8e90bbd..0d3b878367ea 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -176,7 +176,7 @@ def test_save_load_float16(self): output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() - self.assertLess(max_diff, 5e-3, "The output of the fp16 pipeline changed after saving and loading.") + self.assertLess(max_diff, 3e-3, "The output of the fp16 pipeline changed after saving and loading.") def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): From 148546f2d0b77de876e1326cbdffa8a82e9ee11f Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 13:40:12 +0100 Subject: [PATCH 08/24] CycleDiffusionPipelineFastTests --- .../stable_diffusion/test_cycle_diffusion.py | 228 +++++------------- tests/test_pipelines_common.py | 9 +- 2 files changed, 62 insertions(+), 175 deletions(-) diff --git a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py index 33157ed9ad30..525df1d72328 100644 --- a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py @@ -20,7 +20,7 @@ import numpy as np import torch -from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel, UNet2DModel, VQModel +from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer @@ -32,39 +32,11 @@ class CycleDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) + pipeline_class = CycleDiffusionPipeline - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_uncond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model - - @property - def dummy_cond_unet(self): - torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -74,40 +46,16 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=9, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_vq_model(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + num_train_timesteps=1000, + clip_sample=False, + set_alpha_to_one=False, ) - return model - - @property - def dummy_vae(self): torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -115,12 +63,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -131,68 +75,47 @@ def dummy_text_encoder(self): pad_token_id=1, vocab_size=1000, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - return extract + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "An astronaut riding an elephant", + "source_prompt": "An astronaut riding a horse", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "eta": 0.1, + "strength": 0.8, + "guidance_scale": 3, + "source_guidance_scale": 1, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_cycle(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - num_train_timesteps=1000, - clip_sample=False, - set_alpha_to_one=False, - ) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = CycleDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - source_prompt = "An astronaut riding a horse" - prompt = "An astronaut riding an elephant" - init_image = self.dummy_image.to(device) + components = self.get_dummy_components() + pipe = CycleDiffusionPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - prompt=prompt, - source_prompt=source_prompt, - generator=generator, - num_inference_steps=2, - image=init_image, - eta=0.1, - strength=0.8, - guidance_scale=3, - source_guidance_scale=1, - output_type="np", - ) + inputs = self.get_dummy_inputs(device) + output = pipe(**inputs) images = output.images image_slice = images[0, -3:, -3:, -1] @@ -204,53 +127,16 @@ def test_stable_diffusion_cycle(self): @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_cycle_fp16(self): - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - num_train_timesteps=1000, - clip_sample=False, - set_alpha_to_one=False, - ) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = CycleDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - source_prompt = "An astronaut riding a horse" - prompt = "An astronaut riding an elephant" - init_image = self.dummy_image.to(torch_device) + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.half() + pipe = CycleDiffusionPipeline(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=torch_device).manual_seed(0) - output = sd_pipe( - prompt=prompt, - source_prompt=source_prompt, - generator=generator, - num_inference_steps=2, - image=init_image, - eta=0.1, - strength=0.8, - guidance_scale=3, - source_guidance_scale=1, - output_type="np", - ) + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs) images = output.images image_slice = images[0, -3:, -3:, -1] diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index 0d3b878367ea..575d49237f8f 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -2,6 +2,7 @@ import gc import inspect import io +import re import tempfile import time import unittest @@ -103,7 +104,7 @@ def test_num_inference_steps_consistent(self): outputs = [] times = [] - for num_steps in [1, 3, 6]: + for num_steps in [3, 6, 9]: inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps @@ -303,12 +304,12 @@ def test_progress_bar(self): pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) - num_steps = inputs["num_inference_steps"] with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) + stderr = stderr.getvalue() + max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue( - f"{num_steps}/{num_steps}" in stderr.getvalue(), - "Progress bar should be enabled, displaying the requested `num_inference_steps`", + f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) From 3e6a354cf5313ffbb8b394b71529d31e00a4ef1b Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 13:50:24 +0100 Subject: [PATCH 09/24] DanceDiffusionPipelineFastTests --- .../dance_diffusion/test_dance_diffusion.py | 54 ++++++++++--------- tests/test_pipelines_common.py | 18 +++---- 2 files changed, 39 insertions(+), 33 deletions(-) diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index a63ef84c63f5..c7e1b1c4d1d1 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -23,21 +23,20 @@ from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu +from ...test_pipelines_common import PipelineTesterMixin + torch.backends.cuda.matmul.allow_tf32 = False -class PipelineFastTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() +class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = DanceDiffusionPipeline + test_attention_slicing = False + test_cpu_offload = False - @property - def dummy_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet1DModel( + unet = UNet1DModel( block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=512, @@ -48,34 +47,41 @@ def dummy_unet(self): use_timestep_embedding=False, time_embedding_type="fourier", mid_block_type="UNetMidBlock1D", - down_block_types=["DownBlock1DNoSkip"] + ["DownBlock1D"] + ["AttnDownBlock1D"], - up_block_types=["AttnUpBlock1D"] + ["UpBlock1D"] + ["UpBlock1DNoSkip"], + down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), + up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), ) - return model + scheduler = IPNDMScheduler() + + components = { + "unet": unet, + "scheduler": scheduler, + } + return components + + def get_dummy_inputs(self, device, seed=0): + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "generator": generator, + "num_inference_steps": 4, + } + return inputs def test_dance_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - scheduler = IPNDMScheduler() - - pipe = DanceDiffusionPipeline(unet=self.dummy_unet, scheduler=scheduler) + components = self.get_dummy_components() + pipe = DanceDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=device).manual_seed(0) - output = pipe(generator=generator, num_inference_steps=4) + inputs = self.get_dummy_inputs(device) + output = pipe(**inputs) audio = output.audios - generator = torch.Generator(device=device).manual_seed(0) - output = pipe(generator=generator, num_inference_steps=4, return_dict=False) - audio_from_tuple = output[0] - audio_slice = audio[0, -3:, -3:] - audio_from_tuple_slice = audio_from_tuple[0, -3:, -3:] - assert audio.shape == (1, 2, self.dummy_unet.sample_size) + assert audio.shape == (1, 2, components["unet"].sample_size) expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(audio_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @slow diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index 575d49237f8f..b8e2276e6274 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -243,13 +243,13 @@ def test_attention_slicing_forward_pass(self): pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) - output_without_slicing = pipe(**inputs) + output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(torch_device) - output_with_slicing = pipe(**inputs) + output_with_slicing = pipe(**inputs)[0] - max_diff = np.abs(output_with_slicing.images - output_without_slicing.images).max() + max_diff = np.abs(output_with_slicing - output_without_slicing).max() self.assertLess(max_diff, 1e-5, "Attention slicing should not affect the inference results") @unittest.skipIf( @@ -266,13 +266,13 @@ def test_cpu_offload_forward_pass(self): pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) - output_without_offload = pipe(**inputs) + output_without_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload() inputs = self.get_dummy_inputs(torch_device) - output_with_offload = pipe(**inputs) + output_with_offload = pipe(**inputs)[0] - max_diff = np.abs(output_with_offload.images - output_without_offload.images).max() + max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-5, "CPU offloading should not affect the inference results") @unittest.skipIf( @@ -289,13 +289,13 @@ def test_xformers_attention_forward_pass(self): pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) - output_without_offload = pipe(**inputs) + output_without_offload = pipe(**inputs)[0] pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) - output_with_offload = pipe(**inputs) + output_with_offload = pipe(**inputs)[0] - max_diff = np.abs(output_with_offload.images - output_without_offload.images).max() + max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-5, "XFormers attention should not affect the inference results") def test_progress_bar(self): From 488a6b5039ac5e6cb7653ba23ab33fd093fc25a2 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 14:51:26 +0100 Subject: [PATCH 10/24] AltDiffusionPipelineFastTests --- .../altdiffusion/test_alt_diffusion.py | 251 ++++++------------ tests/test_pipelines_common.py | 3 + 2 files changed, 91 insertions(+), 163 deletions(-) diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion.py b/tests/pipelines/altdiffusion/test_alt_diffusion.py index 91fe76444920..508fc16a9c59 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion.py @@ -14,7 +14,6 @@ # limitations under the License. import gc -import random import unittest import numpy as np @@ -25,9 +24,9 @@ RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) -from diffusers.utils import floats_tensor, slow, torch_device +from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import XLMRobertaTokenizer +from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from ...test_pipelines_common import PipelineTesterMixin @@ -36,25 +35,11 @@ class AltDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() + pipeline_class = AltDiffusionPipeline - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_cond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -64,27 +49,15 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=9, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, ) - return model - - @property - def dummy_vae(self): torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -92,84 +65,86 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - @property - def dummy_text_encoder(self): + # TODO: address the non-deterministic text encoder (fails for save-load tests) + # torch.manual_seed(0) + # text_encoder_config = RobertaSeriesConfig( + # hidden_size=32, + # project_dim=32, + # intermediate_size=37, + # layer_norm_eps=1e-05, + # num_attention_heads=4, + # num_hidden_layers=5, + # vocab_size=5002, + # ) + # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) + torch.manual_seed(0) - config = RobertaSeriesConfig( + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, hidden_size=32, - project_dim=32, + projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, + pad_token_id=1, vocab_size=5002, ) - return RobertaSeriesModelWithTransformation(config) + text_encoder = CLIPTextModel(text_encoder_config) - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() + tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") + tokenizer.model_max_length = 77 - return extract + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": torch.Generator(device=device).manual_seed(seed), + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_alt_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, + components = self.get_dummy_components() + torch.manual_seed(0) + text_encoder_config = RobertaSeriesConfig( + hidden_size=32, + project_dim=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + vocab_size=5002, ) + # TODO: remove after fixing the non-deterministic text encoder + text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) + components["text_encoder"] = text_encoder + + alt_pipe = AltDiffusionPipeline(**components) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) - prompt = "A photo of an astronaut" - - generator = torch.Generator(device=device).manual_seed(0) - output = alt_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = "A photo of an astronaut" + output = alt_pipe(**inputs) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = alt_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( @@ -177,89 +152,39 @@ def test_alt_diffusion_ddim(self): ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_alt_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + text_encoder_config = RobertaSeriesConfig( + hidden_size=32, + project_dim=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + vocab_size=5002, ) + # TODO: remove after fixing the non-deterministic text encoder + text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) + components["text_encoder"] = text_encoder + alt_pipe = AltDiffusionPipeline(**components) alt_pipe = alt_pipe.to(device) alt_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = alt_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - + inputs = self.get_dummy_inputs(device) + output = alt_pipe(**inputs) image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = alt_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_alt_diffusion_fp16(self): - """Test that stable diffusion works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta") - tokenizer.model_max_length = 77 - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - alt_pipe = AltDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - alt_pipe = alt_pipe.to(torch_device) - alt_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = alt_pipe([prompt], generator=generator, num_inference_steps=2, output_type="np").images - - assert image.shape == (1, 64, 64, 3) @slow diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index b8e2276e6274..968817b05acf 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -307,7 +307,10 @@ def test_progress_bar(self): with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() + # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, + # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) + self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) From cb61d0a94282251d22b8a6fe9841d0a7e9664ad6 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 15:00:38 +0100 Subject: [PATCH 11/24] StableDiffusion2PipelineFastTests --- .../test_stable_diffusion.py | 390 +++--------------- 1 file changed, 60 insertions(+), 330 deletions(-) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py index efa4bdc6f3d1..be3b5b1ea214 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -43,16 +43,11 @@ class StableDiffusion2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() + pipeline_class = StableDiffusionPipeline - @property - def dummy_cond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -65,12 +60,15 @@ def dummy_cond_unet(self): attention_head_dim=(2, 4, 8, 8), use_linear_projection=True, ) - return model - - @property - def dummy_vae(self): + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -79,12 +77,8 @@ def dummy_vae(self): latent_channels=4, sample_size=128, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -98,378 +92,114 @@ def dummy_text_encoder(self): hidden_act="gelu", projection_dim=512, ) - return CLIPTextModel(config) - - def test_save_pretrained_from_pretrained(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - - vae = self.dummy_vae - bert = self.dummy_text_encoder + text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - image = output.images - - with tempfile.TemporaryDirectory() as tmpdirname: - sd_pipe.save_pretrained(tmpdirname) - sd_pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) - sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - generator = generator.manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - new_image = output.images - - assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass" + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": torch.Generator(device=device).manual_seed(seed), + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5649, 0.6022, 0.4804, 0.5270, 0.5585, 0.4643, 0.5159, 0.4963, 0.4793]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5099, 0.5677, 0.4671, 0.5128, 0.5697, 0.4676, 0.5277, 0.4964, 0.4946]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4717, 0.5376, 0.4568, 0.5225, 0.5734, 0.4797, 0.5467, 0.5074, 0.5043]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = EulerAncestralDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = EulerAncestralDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4715, 0.5376, 0.4569, 0.5224, 0.5734, 0.4797, 0.5465, 0.5074, 0.5046]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = EulerDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = EulerDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4717, 0.5376, 0.4568, 0.5225, 0.5734, 0.4797, 0.5467, 0.5074, 0.5043]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_attention_chunk(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - # make sure chunking the attention yields the same result - sd_pipe.enable_attention_slicing(slice_size=1) - generator = torch.Generator(device=device).manual_seed(0) - output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") - - assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 1e-4 - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_fp16(self): - """Test that stable diffusion works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe([prompt], generator=generator, num_inference_steps=2, output_type="np").images - - assert image.shape == (1, 64, 64, 3) def test_stable_diffusion_long_prompt(self): - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) From 75060ac95a88f43d84b03290c5a597da7d072d6f Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 15:10:15 +0100 Subject: [PATCH 12/24] StableDiffusion2InpaintPipelineFastTests --- .../test_stable_diffusion.py | 1 - .../test_stable_diffusion_inpaint.py | 179 +++++------------- 2 files changed, 43 insertions(+), 137 deletions(-) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py index be3b5b1ea214..3c9565dd5bc2 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -14,7 +14,6 @@ # limitations under the License. import gc -import tempfile import time import unittest diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py index b420570f0707..b2b508fa80ca 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py @@ -22,7 +22,7 @@ from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device -from diffusers.utils.testing_utils import require_torch_gpu +from diffusers.utils.testing_utils import require_torch_gpu, slow from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer @@ -32,26 +32,12 @@ torch.backends.cuda.matmul.allow_tf32 = False -class StableDiffusionInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image +class StableDiffusion2InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = StableDiffusionInpaintPipeline - @property - def dummy_cond_unet_inpaint(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -64,25 +50,19 @@ def dummy_cond_unet_inpaint(self): attention_head_dim=(2, 4, 8, 8), use_linear_projection=True, ) - return model - - @property - def dummy_vae(self): + scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, + sample_size=128, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -96,129 +76,56 @@ def dummy_text_encoder(self): hidden_act="gelu", projection_dim=512, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() - - return extract - - def test_stable_diffusion_inpaint(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - text_encoder = self.dummy_text_encoder + text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) + def test_stable_diffusion_inpaint(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_inpaint_fp16(self): - """Test that stable diffusion inpaint works with fp16""" - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - text_encoder = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) - - # put models in fp16 - unet = unet.half() - vae = vae.half() - text_encoder = text_encoder.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe( - [prompt], - generator=generator, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - ).images - - assert image.shape == (1, 64, 64, 3) -# @slow +@slow @require_torch_gpu class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): From 4fb91fd7711d4551581307366b7b524d91600464 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 15:24:21 +0100 Subject: [PATCH 13/24] StableDiffusionImageVariationPipelineFastTests --- .../test_stable_diffusion_image_variation.py | 231 ++++-------------- 1 file changed, 50 insertions(+), 181 deletions(-) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py index 90bfef5efeea..eb6824432fcd 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py @@ -38,25 +38,11 @@ class StableDiffusionImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) + pipeline_class = StableDiffusionImageVariationPipeline - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_cond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -66,12 +52,9 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_vae(self): + scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -79,12 +62,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_image_encoder(self): torch.manual_seed(0) - config = CLIPVisionConfig( + image_encoder_config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, @@ -94,102 +73,55 @@ def dummy_image_encoder(self): image_size=32, patch_size=4, ) - return CLIPVisionModelWithProjection(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() - - return extract + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "image_encoder": image_encoder, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_img_variation_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - image_encoder = self.dummy_image_encoder - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImageVariationPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - image_encoder=image_encoder, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - init_image, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - init_image, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5093, 0.5717, 0.4806, 0.4891, 0.5552, 0.4594, 0.5177, 0.4894, 0.4904]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img_variation_multiple_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - image_encoder = self.dummy_image_encoder - - init_image = self.dummy_image.to(device).repeat(2, 1, 1, 1) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImageVariationPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - image_encoder=image_encoder, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - init_image, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - ) + inputs = self.get_dummy_inputs(device) + inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) + output = sd_pipe(**inputs) image = output.images @@ -201,103 +133,40 @@ def test_stable_diffusion_img_variation_multiple_images(self): def test_stable_diffusion_img_variation_num_images_per_prompt(self): device = "cpu" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - image_encoder = self.dummy_image_encoder - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImageVariationPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - image_encoder=image_encoder, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test num_images_per_prompt=1 (default) - images = sd_pipe( - init_image, - num_inference_steps=2, - output_type="np", - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs).images assert images.shape == (1, 64, 64, 3) # test num_images_per_prompt=1 (default) for batch of images batch_size = 2 - images = sd_pipe( - init_image.repeat(batch_size, 1, 1, 1), - num_inference_steps=2, - output_type="np", - ).images + inputs = self.get_dummy_inputs(device) + inputs["image"] = inputs["image"].repeat(batch_size, 1, 1, 1) + images = sd_pipe(**inputs).images assert images.shape == (batch_size, 64, 64, 3) # test num_images_per_prompt for single prompt num_images_per_prompt = 2 - images = sd_pipe( - init_image, - num_inference_steps=2, - output_type="np", - num_images_per_prompt=num_images_per_prompt, - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs, num_images_per_prompt=num_images_per_prompt).images assert images.shape == (num_images_per_prompt, 64, 64, 3) # test num_images_per_prompt for batch of prompts batch_size = 2 - images = sd_pipe( - init_image.repeat(batch_size, 1, 1, 1), - num_inference_steps=2, - output_type="np", - num_images_per_prompt=num_images_per_prompt, - ).images + inputs = self.get_dummy_inputs(device) + inputs["image"] = inputs["image"].repeat(batch_size, 1, 1, 1) + images = sd_pipe(**inputs, num_images_per_prompt=num_images_per_prompt).images assert images.shape == (batch_size * num_images_per_prompt, 64, 64, 3) - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_img_variation_fp16(self): - """Test that stable diffusion img2img works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - image_encoder = self.dummy_image_encoder - - init_image = self.dummy_image.to(torch_device).float() - - # put models in fp16 - unet = unet.half() - vae = vae.half() - image_encoder = image_encoder.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImageVariationPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - image_encoder=image_encoder, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe( - init_image, - generator=generator, - num_inference_steps=2, - output_type="np", - ).images - - assert image.shape == (1, 64, 64, 3) - @slow @require_torch_gpu From 32d6b4b85825c810f3935c3d9079bb8c02b4d6fe Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 15:39:34 +0100 Subject: [PATCH 14/24] StableDiffusionImg2ImgPipelineFastTests --- .../test_stable_diffusion_img2img.py | 386 +++--------------- 1 file changed, 64 insertions(+), 322 deletions(-) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py index 4d8247195504..3dfa58f3fdb7 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -27,8 +27,6 @@ PNDMScheduler, StableDiffusionImg2ImgPipeline, UNet2DConditionModel, - UNet2DModel, - VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu @@ -41,39 +39,11 @@ class StableDiffusionImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_uncond_unet(self): - torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model + pipeline_class = StableDiffusionImg2ImgPipeline - @property - def dummy_cond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -83,40 +53,9 @@ def dummy_cond_unet(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=9, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_vq_model(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, - ) - return model - - @property - def dummy_vae(self): + scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -124,12 +63,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -140,113 +75,58 @@ def dummy_text_encoder(self): pad_token_id=1, vocab_size=1000, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - return extract + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs def test_stable_diffusion_img2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4492, 0.3865, 0.4222, 0.5854, 0.5139, 0.4379, 0.4193, 0.48, 0.4218]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" + inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - prompt, - negative_prompt=negative_prompt, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) + output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] @@ -256,40 +136,15 @@ def test_stable_diffusion_img2img_negative_prompt(self): def test_stable_diffusion_img2img_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device).repeat(2, 1, 1, 1) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = 2 * ["A painting of a squirrel eating a burger"] - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - prompt, - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) - - image = output.images - + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * 2 + inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) + image = sd_pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) @@ -298,171 +153,58 @@ def test_stable_diffusion_img2img_multiple_init_images(self): def test_stable_diffusion_img2img_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet - scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") - - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler( + beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - ) - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - return_dict=False, - ) - image_from_tuple = output[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4367, 0.4986, 0.4372, 0.6706, 0.5665, 0.444, 0.5864, 0.6019, 0.5203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_num_images_per_prompt(self): - device = "cpu" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(device) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - # test num_images_per_prompt=1 (default) - images = sd_pipe( - prompt, - num_inference_steps=2, - output_type="np", - image=init_image, - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs).images assert images.shape == (1, 32, 32, 3) # test num_images_per_prompt=1 (default) for batch of prompts batch_size = 2 - images = sd_pipe( - [prompt] * batch_size, - num_inference_steps=2, - output_type="np", - image=init_image, - ).images + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * batch_size + images = sd_pipe(**inputs).images assert images.shape == (batch_size, 32, 32, 3) # test num_images_per_prompt for single prompt num_images_per_prompt = 2 - images = sd_pipe( - prompt, - num_inference_steps=2, - output_type="np", - image=init_image, - num_images_per_prompt=num_images_per_prompt, - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs, num_images_per_prompt=num_images_per_prompt).images assert images.shape == (num_images_per_prompt, 32, 32, 3) # test num_images_per_prompt for batch of prompts batch_size = 2 - images = sd_pipe( - [prompt] * batch_size, - num_inference_steps=2, - output_type="np", - image=init_image, - num_images_per_prompt=num_images_per_prompt, - ).images + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * batch_size + images = sd_pipe(**inputs, num_images_per_prompt=num_images_per_prompt).images assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3) - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_img2img_fp16(self): - """Test that stable diffusion img2img works with fp16""" - unet = self.dummy_cond_unet - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - init_image = self.dummy_image.to(torch_device) - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionImg2ImgPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=self.dummy_extractor, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe( - [prompt], - generator=generator, - num_inference_steps=2, - output_type="np", - image=init_image, - ).images - - assert image.shape == (1, 32, 32, 3) - @slow @require_torch_gpu From 4e8b51aab0a4c235033087fe7b85dd8d5f84847b Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 16:19:04 +0100 Subject: [PATCH 15/24] StableDiffusionInpaintPipelineFastTests --- .../test_stable_diffusion_inpaint.py | 315 +++--------------- 1 file changed, 55 insertions(+), 260 deletions(-) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py index 6e6780d47ed6..935bfa6a2fdc 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py @@ -26,8 +26,6 @@ PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel, - UNet2DModel, - VQModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device @@ -42,54 +40,11 @@ class StableDiffusionInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image + pipeline_class = StableDiffusionInpaintPipeline - @property - def dummy_uncond_unet(self): + def get_dummy_components(self): torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model - - @property - def dummy_cond_unet(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - return model - - @property - def dummy_cond_unet_inpaint(self): - torch.manual_seed(0) - model = UNet2DConditionModel( + unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, @@ -99,25 +54,9 @@ def dummy_cond_unet_inpaint(self): up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) - return model - - @property - def dummy_vq_model(self): - torch.manual_seed(0) - model = VQModel( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=3, - ) - return model - - @property - def dummy_vae(self): + scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) - model = AutoencoderKL( + vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, @@ -125,12 +64,8 @@ def dummy_vae(self): up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) - return model - - @property - def dummy_text_encoder(self): torch.manual_seed(0) - config = CLIPTextConfig( + text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, @@ -141,226 +76,86 @@ def dummy_text_encoder(self): pad_token_id=1, vocab_size=1000, ) - return CLIPTextModel(config) - - @property - def dummy_extractor(self): - def extract(*args, **kwargs): - class Out: - def __init__(self): - self.pixel_values = torch.ones([0]) - - def to(self, device): - self.pixel_values.to(device) - return self - - return Out() - - return extract - - def test_stable_diffusion_inpaint(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder + text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "numpy", + } + return inputs - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) + def test_stable_diffusion_inpaint(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - return_dict=False, - )[0] - + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4723, 0.5731, 0.3939, 0.5441, 0.5922, 0.4392, 0.5059, 0.4651, 0.4474]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_image_tensor(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.repeat(1, 1, 2, 2) - mask_image = image / 2 - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=image, - mask_image=mask_image[:, 0], - ) - out_1 = output.images + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + out_pil = output.images - image = image.cpu().permute(0, 2, 3, 1)[0] - mask_image = mask_image.cpu().permute(0, 2, 3, 1)[0] + inputs = self.get_dummy_inputs(device) + inputs["image"] = torch.tensor(np.array(inputs["image"]) / 127.5 - 1).permute(2, 0, 1).unsqueeze(0) + inputs["mask_image"] = torch.tensor(np.array(inputs["mask_image"]) / 255).permute(2, 0, 1)[:1].unsqueeze(0) + output = sd_pipe(**inputs) + out_tensor = output.images - image = Image.fromarray(np.uint8(image)).convert("RGB") - mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB") - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=image, - mask_image=mask_image, - ) - out_2 = output.images - - assert out_1.shape == (1, 64, 64, 3) - assert np.abs(out_1.flatten() - out_2.flatten()).max() < 5e-2 + assert out_pil.shape == (1, 64, 64, 3) + assert np.abs(out_pil.flatten() - out_tensor.flatten()).max() < 5e-2 def test_stable_diffusion_inpaint_with_num_images_per_prompt(self): device = "cpu" - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - images = sd_pipe( - [prompt], - generator=generator, - guidance_scale=6.0, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - num_images_per_prompt=2, - ).images + inputs = self.get_dummy_inputs(device) + images = sd_pipe(**inputs, num_images_per_prompt=2).images # check if the output is a list of 2 images assert len(images) == 2 - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_inpaint_fp16(self): - """Test that stable diffusion inpaint_legacy works with fp16""" - unet = self.dummy_cond_unet_inpaint - scheduler = PNDMScheduler(skip_prk_steps=True) - vae = self.dummy_vae - bert = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) - - # put models in fp16 - unet = unet.half() - vae = vae.half() - bert = bert.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionInpaintPipeline( - unet=unet, - scheduler=scheduler, - vae=vae, - text_encoder=bert, - tokenizer=tokenizer, - safety_checker=None, - feature_extractor=None, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=torch_device).manual_seed(0) - image = sd_pipe( - [prompt], - generator=generator, - num_inference_steps=2, - output_type="np", - image=init_image, - mask_image=mask_image, - ).images - - assert image.shape == (1, 64, 64, 3) - @slow @require_torch_gpu From 58b31b85e95564ea94da4626e3275159e1f54a0f Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 16:23:47 +0100 Subject: [PATCH 16/24] remove unused mixins --- tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py | 2 +- tests/pipelines/ddpm/test_ddpm.py | 2 +- tests/pipelines/karras_ve/test_karras_ve.py | 2 +- tests/pipelines/latent_diffusion/test_latent_diffusion.py | 2 +- .../latent_diffusion/test_latent_diffusion_superresolution.py | 2 +- .../pipelines/latent_diffusion/test_latent_diffusion_uncond.py | 2 +- tests/pipelines/pndm/test_pndm.py | 2 +- tests/pipelines/score_sde_ve/test_score_sde_ve.py | 2 +- .../stable_diffusion/test_stable_diffusion_inpaint_legacy.py | 2 +- .../stable_diffusion_2/test_stable_diffusion_upscale.py | 2 +- .../stable_diffusion_2/test_stable_diffusion_v_pred.py | 2 +- tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py | 2 +- .../versatile_diffusion/test_versatile_diffusion_dual_guided.py | 2 +- .../test_versatile_diffusion_image_variation.py | 2 +- .../versatile_diffusion/test_versatile_diffusion_mega.py | 2 +- .../test_versatile_diffusion_text_to_image.py | 2 +- tests/pipelines/vq_diffusion/test_vq_diffusion.py | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py index 434e55f946b6..9da51e328c75 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py @@ -35,7 +35,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class AltDiffusionImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class AltDiffusionImg2ImgPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/ddpm/test_ddpm.py b/tests/pipelines/ddpm/test_ddpm.py index 0f7ca86273e0..38f15baa3c1c 100644 --- a/tests/pipelines/ddpm/test_ddpm.py +++ b/tests/pipelines/ddpm/test_ddpm.py @@ -28,7 +28,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class DDPMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class DDPMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/karras_ve/test_karras_ve.py b/tests/pipelines/karras_ve/test_karras_ve.py index 1fafa1cb40fa..962249d34f7f 100644 --- a/tests/pipelines/karras_ve/test_karras_ve.py +++ b/tests/pipelines/karras_ve/test_karras_ve.py @@ -27,7 +27,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class KarrasVePipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class KarrasVePipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion.py b/tests/pipelines/latent_diffusion/test_latent_diffusion.py index 9d5c07809dc0..1b0c73b7cc23 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion.py @@ -28,7 +28,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class LDMTextToImagePipelineFastTests(unittest.TestCase): @property def dummy_cond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py index d7992c2a43ab..6d2615b533b9 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -29,7 +29,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class LDMSuperResolutionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class LDMSuperResolutionPipelineFastTests(unittest.TestCase): @property def dummy_image(self): batch_size = 1 diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py index f063d6759e9b..649f68503995 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py @@ -28,7 +28,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class LDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class LDMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/pndm/test_pndm.py b/tests/pipelines/pndm/test_pndm.py index 5d9212223e6e..355f4b390066 100644 --- a/tests/pipelines/pndm/test_pndm.py +++ b/tests/pipelines/pndm/test_pndm.py @@ -27,7 +27,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class PNDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class PNDMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/score_sde_ve/test_score_sde_ve.py b/tests/pipelines/score_sde_ve/test_score_sde_ve.py index 9cdf3f0191e1..7deb4f882015 100644 --- a/tests/pipelines/score_sde_ve/test_score_sde_ve.py +++ b/tests/pipelines/score_sde_ve/test_score_sde_ve.py @@ -27,7 +27,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class ScoreSdeVeipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class ScoreSdeVeipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py index 95ad79a25748..3de8dec0e11b 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py @@ -41,7 +41,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class StableDiffusionInpaintLegacyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class StableDiffusionInpaintLegacyPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index ddb9b3358a84..5b9daec04d30 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -32,7 +32,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class StableDiffusionUpscalePipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class StableDiffusionUpscalePipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index bbe4f4943697..67b302b6910d 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -38,7 +38,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class StableDiffusion2VPredictionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py index dbb991479377..1af2ad5c244e 100644 --- a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py +++ b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py @@ -33,7 +33,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class SafeDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class SafeDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py index 9fb6ca522f5d..51b05be53b5c 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py @@ -29,7 +29,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class VersatileDiffusionDualGuidedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VersatileDiffusionDualGuidedPipelineFastTests(unittest.TestCase): pass diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py index 1711b752992f..7ec5f2b46f8e 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py @@ -27,7 +27,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class VersatileDiffusionImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VersatileDiffusionImageVariationPipelineFastTests(unittest.TestCase): pass diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py index 31085aeb614d..001e5bf08dcd 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py @@ -29,7 +29,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class VersatileDiffusionMegaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VersatileDiffusionMegaPipelineFastTests(unittest.TestCase): pass diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py index 027819efee9f..07ee407a5327 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py @@ -29,7 +29,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class VersatileDiffusionTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VersatileDiffusionTextToImagePipelineFastTests(unittest.TestCase): pass diff --git a/tests/pipelines/vq_diffusion/test_vq_diffusion.py b/tests/pipelines/vq_diffusion/test_vq_diffusion.py index 87e29cbc97de..167371c3d6b1 100644 --- a/tests/pipelines/vq_diffusion/test_vq_diffusion.py +++ b/tests/pipelines/vq_diffusion/test_vq_diffusion.py @@ -31,7 +31,7 @@ torch.backends.cuda.matmul.allow_tf32 = False -class VQDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): +class VQDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() From 806af670154408d3b403a7fa611c504b84011239 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 16:25:36 +0100 Subject: [PATCH 17/24] quality --- tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py | 2 -- tests/pipelines/ddpm/test_ddpm.py | 2 -- tests/pipelines/karras_ve/test_karras_ve.py | 2 -- tests/pipelines/latent_diffusion/test_latent_diffusion.py | 2 -- .../latent_diffusion/test_latent_diffusion_superresolution.py | 2 -- .../pipelines/latent_diffusion/test_latent_diffusion_uncond.py | 2 -- tests/pipelines/pndm/test_pndm.py | 2 -- tests/pipelines/score_sde_ve/test_score_sde_ve.py | 2 -- .../stable_diffusion/test_stable_diffusion_inpaint_legacy.py | 2 -- .../stable_diffusion_2/test_stable_diffusion_upscale.py | 2 -- .../stable_diffusion_2/test_stable_diffusion_v_pred.py | 2 -- tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py | 2 -- .../versatile_diffusion/test_versatile_diffusion_dual_guided.py | 2 -- .../test_versatile_diffusion_image_variation.py | 2 -- .../versatile_diffusion/test_versatile_diffusion_mega.py | 2 -- .../test_versatile_diffusion_text_to_image.py | 2 -- tests/pipelines/vq_diffusion/test_vq_diffusion.py | 2 -- 17 files changed, 34 deletions(-) diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py index 9da51e328c75..761b2c013401 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py @@ -29,8 +29,6 @@ from diffusers.utils.testing_utils import require_torch_gpu from transformers import XLMRobertaTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/ddpm/test_ddpm.py b/tests/pipelines/ddpm/test_ddpm.py index 38f15baa3c1c..7d51cb37e064 100644 --- a/tests/pipelines/ddpm/test_ddpm.py +++ b/tests/pipelines/ddpm/test_ddpm.py @@ -22,8 +22,6 @@ from diffusers.utils import deprecate from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/karras_ve/test_karras_ve.py b/tests/pipelines/karras_ve/test_karras_ve.py index 962249d34f7f..9806e8bf373e 100644 --- a/tests/pipelines/karras_ve/test_karras_ve.py +++ b/tests/pipelines/karras_ve/test_karras_ve.py @@ -21,8 +21,6 @@ from diffusers import KarrasVePipeline, KarrasVeScheduler, UNet2DModel from diffusers.utils.testing_utils import require_torch, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion.py b/tests/pipelines/latent_diffusion/test_latent_diffusion.py index 1b0c73b7cc23..ad8d77807294 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion.py @@ -22,8 +22,6 @@ from diffusers.utils.testing_utils import require_torch, slow, torch_device from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py index 6d2615b533b9..89356f85728f 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -23,8 +23,6 @@ from diffusers.utils import PIL_INTERPOLATION, floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import require_torch -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py index 649f68503995..39ad12254e09 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py @@ -22,8 +22,6 @@ from diffusers.utils.testing_utils import require_torch, slow, torch_device from transformers import CLIPTextConfig, CLIPTextModel -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/pndm/test_pndm.py b/tests/pipelines/pndm/test_pndm.py index 355f4b390066..8851607ccc07 100644 --- a/tests/pipelines/pndm/test_pndm.py +++ b/tests/pipelines/pndm/test_pndm.py @@ -21,8 +21,6 @@ from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel from diffusers.utils.testing_utils import require_torch, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/score_sde_ve/test_score_sde_ve.py b/tests/pipelines/score_sde_ve/test_score_sde_ve.py index 7deb4f882015..4379bd146f9b 100644 --- a/tests/pipelines/score_sde_ve/test_score_sde_ve.py +++ b/tests/pipelines/score_sde_ve/test_score_sde_ve.py @@ -21,8 +21,6 @@ from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel from diffusers.utils.testing_utils import require_torch, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py index 3de8dec0e11b..9207fa7fd4e4 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py @@ -35,8 +35,6 @@ from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index 5b9daec04d30..0eb43b570b6b 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -26,8 +26,6 @@ from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index 67b302b6910d..5fe973b2ac14 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -32,8 +32,6 @@ from diffusers.utils.testing_utils import require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py index 1af2ad5c244e..aa143bd63b96 100644 --- a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py +++ b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py @@ -27,8 +27,6 @@ from diffusers.utils.testing_utils import require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py index 51b05be53b5c..b8595378a58a 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py @@ -23,8 +23,6 @@ from diffusers import VersatileDiffusionDualGuidedPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py index 7ec5f2b46f8e..4c8bef0e71b3 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_image_variation.py @@ -21,8 +21,6 @@ from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py index 001e5bf08dcd..3670894ed7d6 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py @@ -23,8 +23,6 @@ from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py index 07ee407a5327..44514da0108b 100644 --- a/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py +++ b/tests/pipelines/versatile_diffusion/test_versatile_diffusion_text_to_image.py @@ -23,8 +23,6 @@ from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/vq_diffusion/test_vq_diffusion.py b/tests/pipelines/vq_diffusion/test_vq_diffusion.py index 167371c3d6b1..d992a5215298 100644 --- a/tests/pipelines/vq_diffusion/test_vq_diffusion.py +++ b/tests/pipelines/vq_diffusion/test_vq_diffusion.py @@ -25,8 +25,6 @@ from diffusers.utils.testing_utils import require_torch_gpu from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer -from ...test_pipelines_common import PipelineTesterMixin - torch.backends.cuda.matmul.allow_tf32 = False From a222de7a712594267a4e89130b8cbd7b4355ac68 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 16:32:13 +0100 Subject: [PATCH 18/24] add missing inits --- tests/pipelines/audio_diffusion/__init__.py | 0 tests/pipelines/dance_diffusion/__init__.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/pipelines/audio_diffusion/__init__.py create mode 100644 tests/pipelines/dance_diffusion/__init__.py diff --git a/tests/pipelines/audio_diffusion/__init__.py b/tests/pipelines/audio_diffusion/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/pipelines/dance_diffusion/__init__.py b/tests/pipelines/dance_diffusion/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 From 8789df6c2b952b745d9ff6da1a8b7ccc05664250 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 16:42:30 +0100 Subject: [PATCH 19/24] try to fix mps tests --- tests/pipelines/altdiffusion/test_alt_diffusion.py | 6 +++++- tests/pipelines/dance_diffusion/test_dance_diffusion.py | 5 ++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion.py b/tests/pipelines/altdiffusion/test_alt_diffusion.py index 508fc16a9c59..99406d85c16b 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion.py @@ -109,9 +109,13 @@ def get_dummy_components(self): return components def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", - "generator": torch.Generator(device=device).manual_seed(seed), + "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index c7e1b1c4d1d1..5b96e7f17508 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -59,7 +59,10 @@ def get_dummy_components(self): return components def get_dummy_inputs(self, device, seed=0): - generator = torch.Generator(device=device).manual_seed(seed) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { "generator": generator, "num_inference_steps": 4, From 24400cf7fe7ae569f62001ee2fef47a35d69a31e Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 16:51:14 +0100 Subject: [PATCH 20/24] fix mps tests --- .../dance_diffusion/test_dance_diffusion.py | 12 ++++++++++++ tests/pipelines/ddim/test_ddim.py | 6 +++++- .../stable_diffusion/test_cycle_diffusion.py | 5 ++++- .../stable_diffusion/test_stable_diffusion.py | 6 +++++- .../test_stable_diffusion_image_variation.py | 5 ++++- .../test_stable_diffusion_img2img.py | 5 ++++- .../test_stable_diffusion_inpaint.py | 5 ++++- .../stable_diffusion_2/test_stable_diffusion.py | 6 +++++- .../test_stable_diffusion_inpaint.py | 5 ++++- 9 files changed, 47 insertions(+), 8 deletions(-) diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index 5b96e7f17508..2c99acaee94f 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -69,6 +69,18 @@ def get_dummy_inputs(self, device, seed=0): } return inputs + def test_dict_tuple_outputs_equivalent(self): + # FIXME: this test fails with MPS + pass + + def test_save_load_local(self): + # FIXME: this test fails with MPS + pass + + def test_save_load_optional_components(self): + # FIXME: this test fails with MPS + pass + def test_dance_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() diff --git a/tests/pipelines/ddim/test_ddim.py b/tests/pipelines/ddim/test_ddim.py index 316ea86e96bc..26ea10caeabd 100644 --- a/tests/pipelines/ddim/test_ddim.py +++ b/tests/pipelines/ddim/test_ddim.py @@ -46,8 +46,12 @@ def get_dummy_components(self): return components def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { - "generator": torch.Generator(device=device).manual_seed(seed), + "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } diff --git a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py index 525df1d72328..ae0a13936603 100644 --- a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py @@ -91,7 +91,10 @@ def get_dummy_components(self): def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) - generator = torch.Generator(device=device).manual_seed(seed) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 2c544472efb2..4748a41feae7 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -100,9 +100,13 @@ def get_dummy_components(self): return components def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", - "generator": torch.Generator(device=device).manual_seed(seed), + "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py index eb6824432fcd..4be95322d6b6 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py @@ -87,7 +87,10 @@ def get_dummy_components(self): def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) - generator = torch.Generator(device=device).manual_seed(seed) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "generator": generator, diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py index 3dfa58f3fdb7..7ce06403fa05 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -91,7 +91,10 @@ def get_dummy_components(self): def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) - generator = torch.Generator(device=device).manual_seed(seed) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py index 935bfa6a2fdc..f331209e64f4 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py @@ -96,7 +96,10 @@ def get_dummy_inputs(self, device, seed=0): image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) - generator = torch.Generator(device=device).manual_seed(seed) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py index 03660c507f1a..5f8644754204 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -105,9 +105,13 @@ def get_dummy_components(self): return components def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", - "generator": torch.Generator(device=device).manual_seed(seed), + "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py index b2b508fa80ca..b2d387cb6890 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py @@ -96,7 +96,10 @@ def get_dummy_inputs(self, device, seed=0): image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) - generator = torch.Generator(device=device).manual_seed(seed) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, From efaf2fed2a1fc85dab8e36cbec793d2c99209d4f Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 17:00:57 +0100 Subject: [PATCH 21/24] add mps warmups --- .../dance_diffusion/test_dance_diffusion.py | 12 ----------- tests/test_pipelines_common.py | 20 +++++++++++++++++++ 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/tests/pipelines/dance_diffusion/test_dance_diffusion.py b/tests/pipelines/dance_diffusion/test_dance_diffusion.py index 2c99acaee94f..5b96e7f17508 100644 --- a/tests/pipelines/dance_diffusion/test_dance_diffusion.py +++ b/tests/pipelines/dance_diffusion/test_dance_diffusion.py @@ -69,18 +69,6 @@ def get_dummy_inputs(self, device, seed=0): } return inputs - def test_dict_tuple_outputs_equivalent(self): - # FIXME: this test fails with MPS - pass - - def test_save_load_local(self): - # FIXME: this test fails with MPS - pass - - def test_save_load_optional_components(self): - # FIXME: this test fails with MPS - pass - def test_dance_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index 968817b05acf..c99f75d3b7cb 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -63,6 +63,10 @@ def test_save_load_local(self): pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] @@ -84,6 +88,10 @@ def test_dict_tuple_outputs_equivalent(self): pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + output = pipe(**self.get_dummy_inputs(torch_device))[0] output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0] @@ -102,6 +110,10 @@ def test_num_inference_steps_consistent(self): pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + outputs = [] times = [] for num_steps in [3, 6, 9]: @@ -188,6 +200,10 @@ def test_save_load_optional_components(self): pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) @@ -242,6 +258,10 @@ def test_attention_slicing_forward_pass(self): pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) + # Warmup pass when using mps (see #372) + if torch_device == "mps": + _ = pipe(**self.get_dummy_inputs(torch_device)) + inputs = self.get_dummy_inputs(torch_device) output_without_slicing = pipe(**inputs)[0] From 2799c5501833962c7864ef4624d310a0b92710f7 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 17:12:26 +0100 Subject: [PATCH 22/24] skip for some pipelines --- tests/test_pipelines_common.py | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index c99f75d3b7cb..e4a6d9ca18a6 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -11,7 +11,7 @@ import numpy as np import torch -from diffusers import DiffusionPipeline +from diffusers import DiffusionPipeline, DanceDiffusionPipeline, CycleDiffusionPipeline, StableDiffusionImg2ImgPipeline from diffusers.utils.import_utils import is_accelerate_available, is_xformers_available from diffusers.utils.testing_utils import require_torch, torch_device @@ -58,6 +58,14 @@ def tearDown(self): torch.cuda.empty_cache() def test_save_load_local(self): + if torch_device == "mps" and self.pipeline_class in ( + DanceDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionImg2ImgPipeline, + ): + # FIXME: inconsistent outputs on MPS + return + components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) @@ -83,6 +91,14 @@ def test_save_load_local(self): self.assertLess(max_diff, 1e-5) def test_dict_tuple_outputs_equivalent(self): + if torch_device == "mps" and self.pipeline_class in ( + DanceDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionImg2ImgPipeline, + ): + # FIXME: inconsistent outputs on MPS + return + components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) @@ -195,6 +211,14 @@ def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return + if torch_device == "mps" and self.pipeline_class in ( + DanceDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionImg2ImgPipeline, + ): + # FIXME: inconsistent outputs on MPS + return + components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) @@ -253,6 +277,14 @@ def test_attention_slicing_forward_pass(self): if not self.test_attention_slicing: return + if torch_device == "mps" and self.pipeline_class in ( + DanceDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionImg2ImgPipeline, + ): + # FIXME: inconsistent outputs on MPS + return + components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) From ee38f96c622187b49618f6a0083513d53438ca15 Mon Sep 17 00:00:00 2001 From: anton- Date: Tue, 6 Dec 2022 17:14:19 +0100 Subject: [PATCH 23/24] style --- tests/test_pipelines_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index e4a6d9ca18a6..7ed8e9ba4aee 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -11,7 +11,7 @@ import numpy as np import torch -from diffusers import DiffusionPipeline, DanceDiffusionPipeline, CycleDiffusionPipeline, StableDiffusionImg2ImgPipeline +from diffusers import CycleDiffusionPipeline, DanceDiffusionPipeline, DiffusionPipeline, StableDiffusionImg2ImgPipeline from diffusers.utils.import_utils import is_accelerate_available, is_xformers_available from diffusers.utils.testing_utils import require_torch, torch_device From 080084443b85c6e64bb1c3aab9a3d20dc66a3fdc Mon Sep 17 00:00:00 2001 From: Anton Lozhkov Date: Tue, 6 Dec 2022 18:16:08 +0100 Subject: [PATCH 24/24] Update tests/test_pipelines_common.py Co-authored-by: Patrick von Platen --- tests/test_pipelines_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index 7ed8e9ba4aee..f18c939e1504 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -306,7 +306,7 @@ def test_attention_slicing_forward_pass(self): @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available(), - reason="XFormers attention is only available with CUDA and `accelerate` installed", + reason="CPU offload is only available with CUDA and `accelerate` installed", ) def test_cpu_offload_forward_pass(self): if not self.test_cpu_offload: