From a361852d22ee087893f6abe7d64e8a61e0694728 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 9 Dec 2022 17:02:43 +0000 Subject: [PATCH 1/2] do not automatically enable xformers --- src/diffusers/models/attention.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/diffusers/models/attention.py b/src/diffusers/models/attention.py index 8b855a5ed5f4..99dd5d8d5150 100644 --- a/src/diffusers/models/attention.py +++ b/src/diffusers/models/attention.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import math -import warnings from dataclasses import dataclass from typing import Optional @@ -447,16 +446,6 @@ def __init__( # 3. Feed-forward self.norm3 = nn.LayerNorm(dim) - # if xformers is installed try to use memory_efficient_attention by default - if is_xformers_available(): - try: - self.set_use_memory_efficient_attention_xformers(True) - except Exception as e: - warnings.warn( - "Could not enable memory efficient attention. Make sure xformers is installed" - f" correctly and a GPU is available: {e}" - ) - def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool): if not is_xformers_available(): print("Here is how to install it") From c8a80a220a6da4fc87d64361b5f839da7780de39 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 9 Dec 2022 17:21:35 +0000 Subject: [PATCH 2/2] uP --- examples/dreambooth/train_dreambooth.py | 10 ++++++++++ examples/text_to_image/train_text_to_image.py | 10 ++++++++++ examples/textual_inversion/textual_inversion.py | 10 ++++++++++ 3 files changed, 30 insertions(+) diff --git a/examples/dreambooth/train_dreambooth.py b/examples/dreambooth/train_dreambooth.py index b904920f1cd4..c76ff7c632e2 100644 --- a/examples/dreambooth/train_dreambooth.py +++ b/examples/dreambooth/train_dreambooth.py @@ -17,6 +17,7 @@ from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version +from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torchvision import transforms @@ -488,6 +489,15 @@ def main(args): revision=args.revision, ) + if is_xformers_available(): + try: + unet.enable_xformers_memory_efficient_attention(True) + except Exception as e: + logger.warning( + "Could not enable memory efficient attention. Make sure xformers is installed" + f" correctly and a GPU is available: {e}" + ) + vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) diff --git a/examples/text_to_image/train_text_to_image.py b/examples/text_to_image/train_text_to_image.py index 96016f4cbe10..5135f3b98948 100644 --- a/examples/text_to_image/train_text_to_image.py +++ b/examples/text_to_image/train_text_to_image.py @@ -18,6 +18,7 @@ from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version +from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from torchvision import transforms from tqdm.auto import tqdm @@ -364,6 +365,15 @@ def main(): revision=args.revision, ) + if is_xformers_available(): + try: + unet.enable_xformers_memory_efficient_attention(True) + except Exception as e: + logger.warning( + "Could not enable memory efficient attention. Make sure xformers is installed" + f" correctly and a GPU is available: {e}" + ) + # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index 5b922602f349..90619dc7d886 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -20,6 +20,7 @@ from diffusers.optimization import get_scheduler from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from diffusers.utils import check_min_version +from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami # TODO: remove and import from diffusers.utils when the new version of diffusers is released @@ -439,6 +440,15 @@ def main(): revision=args.revision, ) + if is_xformers_available(): + try: + unet.enable_xformers_memory_efficient_attention(True) + except Exception as e: + logger.warning( + "Could not enable memory efficient attention. Make sure xformers is installed" + f" correctly and a GPU is available: {e}" + ) + # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder.resize_token_embeddings(len(tokenizer))