diff --git a/docs/source/en/optimization/fp16.mdx b/docs/source/en/optimization/fp16.mdx index c18cefbde6a9..fd01cda9d696 100644 --- a/docs/source/en/optimization/fp16.mdx +++ b/docs/source/en/optimization/fp16.mdx @@ -58,7 +58,10 @@ torch.backends.cuda.matmul.allow_tf32 = True To save more GPU memory and get more speed, you can load and run the model weights directly in half precision. This involves loading the float16 version of the weights, which was saved to a branch named `fp16`, and telling PyTorch to use the `float16` type when loading them: ```Python -pipe = StableDiffusionPipeline.from_pretrained( +import torch +from diffusers import DiffusionPipeline + +pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, @@ -85,13 +88,13 @@ For even additional memory savings, you can use a sliced version of attention th each head which can save a significant amount of memory. -To perform the attention computation sequentially over each head, you only need to invoke [`~StableDiffusionPipeline.enable_attention_slicing`] in your pipeline before inference, like here: +To perform the attention computation sequentially over each head, you only need to invoke [`~DiffusionPipeline.enable_attention_slicing`] in your pipeline before inference, like here: ```Python import torch -from diffusers import StableDiffusionPipeline +from diffusers import DiffusionPipeline -pipe = StableDiffusionPipeline.from_pretrained( +pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, @@ -415,10 +418,10 @@ To leverage it just make sure you have: - Cuda available - [Installed the xformers library](xformers). ```python -from diffusers import StableDiffusionPipeline +from diffusers import DiffusionPipeline import torch -pipe = StableDiffusionPipeline.from_pretrained( +pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, ).to("cuda") diff --git a/docs/source/en/optimization/mps.mdx b/docs/source/en/optimization/mps.mdx index 3750724bce57..3be8c621ee3e 100644 --- a/docs/source/en/optimization/mps.mdx +++ b/docs/source/en/optimization/mps.mdx @@ -35,9 +35,9 @@ The snippet below demonstrates how to use the `mps` backend using the familiar ` We strongly recommend you use PyTorch 2 or better, as it solves a number of problems like the one described in the previous tip. ```python -from diffusers import StableDiffusionPipeline +from diffusers import DiffusionPipeline -pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") +pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to("mps") # Recommended if your computer has < 64 GB of RAM diff --git a/docs/source/en/optimization/torch2.0.mdx b/docs/source/en/optimization/torch2.0.mdx index a6a40469e97b..206ac4e447cc 100644 --- a/docs/source/en/optimization/torch2.0.mdx +++ b/docs/source/en/optimization/torch2.0.mdx @@ -35,9 +35,9 @@ pip install --upgrade torch torchvision diffusers ```Python import torch - from diffusers import StableDiffusionPipeline + from diffusers import DiffusionPipeline - pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) + pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" @@ -48,10 +48,10 @@ pip install --upgrade torch torchvision diffusers ```Python import torch - from diffusers import StableDiffusionPipeline + from diffusers import DiffusionPipeline from diffusers.models.attention_processor import AttnProcessor2_0 - pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") + pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") pipe.unet.set_attn_processor(AttnProcessor2_0()) prompt = "a photo of an astronaut riding a horse on mars" @@ -68,11 +68,9 @@ pip install --upgrade torch torchvision diffusers ```python import torch - from diffusers import StableDiffusionPipeline + from diffusers import DiffusionPipeline - pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to( - "cuda" - ) + pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda") pipe.unet = torch.compile(pipe.unet) batch_size = 10 diff --git a/docs/source/en/quicktour.mdx b/docs/source/en/quicktour.mdx index 3aecb422af2a..d494b79dccd5 100644 --- a/docs/source/en/quicktour.mdx +++ b/docs/source/en/quicktour.mdx @@ -141,7 +141,7 @@ Different schedulers come with different denoising speeds and quality trade-offs ```py >>> from diffusers import EulerDiscreteScheduler ->>> pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") +>>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` diff --git a/docs/source/en/stable_diffusion.mdx b/docs/source/en/stable_diffusion.mdx index 8190813e488a..c1eef6fa3c5c 100644 --- a/docs/source/en/stable_diffusion.mdx +++ b/docs/source/en/stable_diffusion.mdx @@ -47,9 +47,9 @@ Let's load the pipeline. ## Speed Optimization ``` python -from diffusers import StableDiffusionPipeline +from diffusers import DiffusionPipeline -pipe = StableDiffusionPipeline.from_pretrained(model_id) +pipe = DiffusionPipeline.from_pretrained(model_id) ``` We aim at generating a beautiful photograph of an *old warrior chief* and will later try to find the best prompt to generate such a photograph. For now, let's keep the prompt simple: @@ -88,7 +88,7 @@ The default run we did above used full float32 precision and ran the default num ``` python import torch -pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) +pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") ``` diff --git a/docs/source/en/training/dreambooth.mdx b/docs/source/en/training/dreambooth.mdx index 623b9124f303..2f694d21a349 100644 --- a/docs/source/en/training/dreambooth.mdx +++ b/docs/source/en/training/dreambooth.mdx @@ -457,11 +457,11 @@ If you have **`"accelerate>=0.16.0"`** installed, you can use the following code inference from an intermediate checkpoint: ```python -from diffusers import StableDiffusionPipeline +from diffusers import DiffusionPipeline import torch model_id = "path_to_saved_model" -pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") +pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A photo of sks dog in a bucket" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] diff --git a/docs/source/en/using-diffusers/using_safetensors.mdx b/docs/source/en/using-diffusers/using_safetensors.mdx index 50bcb6b9933b..b522f3236fbb 100644 --- a/docs/source/en/using-diffusers/using_safetensors.mdx +++ b/docs/source/en/using-diffusers/using_safetensors.mdx @@ -75,9 +75,9 @@ And we're equipped with dealing with it. Then in order to use the model, even before the branch gets accepted by the original author you can do: ```python -from diffusers import StableDiffusionPipeline +from diffusers import DiffusionPipeline -pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", revision="refs/pr/22") +pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", revision="refs/pr/22") ``` or you can test it directly online with this [space](https://huggingface.co/spaces/diffusers/check_pr).