From ac5b1aac43e14d72916530bab2282299a5b9f40e Mon Sep 17 00:00:00 2001 From: Daniel Berry <2730079+yafa11@users.noreply.github.com> Date: Fri, 11 Nov 2022 17:29:08 -0700 Subject: [PATCH 1/2] Update ddim.py Update ddim sampler to support CPU if Cuda is not available --- ldm/models/diffusion/ddim.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py index fb31215db..96f49e0a3 100644 --- a/ldm/models/diffusion/ddim.py +++ b/ldm/models/diffusion/ddim.py @@ -18,8 +18,10 @@ def __init__(self, model, schedule="linear", **kwargs): def register_buffer(self, name, attr): if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): + if attr.device != torch.device("cuda") and torch.cuda.is_available(): attr = attr.to(torch.device("cuda")) + else: + attr = attr.to(torch.device("cpu")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): @@ -238,4 +240,4 @@ def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unco x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) - return x_dec \ No newline at end of file + return x_dec From 7fb37e4fd396ce2c69d61d832c4dc88b0bc76122 Mon Sep 17 00:00:00 2001 From: Daniel Berry <2730079+yafa11@users.noreply.github.com> Date: Fri, 11 Nov 2022 17:29:46 -0700 Subject: [PATCH 2/2] Update plms.py Update plms sampler to support CPU if Cuda is not available --- ldm/models/diffusion/plms.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py index 78eeb1003..63a6d46c5 100644 --- a/ldm/models/diffusion/plms.py +++ b/ldm/models/diffusion/plms.py @@ -17,8 +17,10 @@ def __init__(self, model, schedule="linear", **kwargs): def register_buffer(self, name, attr): if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): + if attr.device != torch.device("cuda") and torch.cuda.is_available(): attr = attr.to(torch.device("cuda")) + else: + attr = attr.to(torch.device("cpu")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):