From 34d0327ff0d26f897c797247e670871204630e19 Mon Sep 17 00:00:00 2001 From: Qing Date: Thu, 3 Nov 2022 20:46:58 +0800 Subject: [PATCH] sd1.5: fix a bug when run --device=cpu on a host has gpu --- lama_cleaner/model/sd.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lama_cleaner/model/sd.py b/lama_cleaner/model/sd.py index afd4eb8..bcd3c0f 100644 --- a/lama_cleaner/model/sd.py +++ b/lama_cleaner/model/sd.py @@ -47,7 +47,7 @@ class CPUTextEncoderWrapper: class SD(InpaintModel): - pad_mod = 8 # current diffusers only support 64 https://github.com/huggingface/diffusers/pull/505 + pad_mod = 8 min_size = 512 def init_model(self, device: torch.device, **kwargs): @@ -60,10 +60,12 @@ class SD(InpaintModel): safety_checker=None, )) + use_gpu = device == torch.device('cuda') and torch.cuda.is_available() + self.model = StableDiffusionInpaintPipeline.from_pretrained( self.model_id_or_path, - revision="fp16" if torch.cuda.is_available() else "main", - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, + revision="fp16" if use_gpu else "main", + torch_dtype=torch.float16 if use_gpu else torch.float32, use_auth_token=kwargs["hf_access_token"], **model_kwargs )