From b5dfa06dfa4f9011bed27614d8becfca659525d3 Mon Sep 17 00:00:00 2001 From: Qing Date: Thu, 30 Mar 2023 21:16:55 +0800 Subject: [PATCH] fix sd local model --- lama_cleaner/model/sd.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lama_cleaner/model/sd.py b/lama_cleaner/model/sd.py index 9117dd6..e1ecece 100644 --- a/lama_cleaner/model/sd.py +++ b/lama_cleaner/model/sd.py @@ -33,7 +33,7 @@ class CPUTextEncoderWrapper: return self.torch_dtype -def load_from_local_model(local_model_path, torch_dtype, disable_nsfw): +def load_from_local_model(local_model_path, torch_dtype, disable_nsfw=True): from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( load_pipeline_from_original_stable_diffusion_ckpt, ) @@ -76,7 +76,6 @@ class SD(DiffusionInpaintModel): model_kwargs = { "local_files_only": kwargs.get("local_files_only", kwargs["sd_run_local"]) } - disable_nsfw = False if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): logger.info("Disable Stable Diffusion Model NSFW checker") model_kwargs.update( @@ -86,7 +85,6 @@ class SD(DiffusionInpaintModel): requires_safety_checker=False, ) ) - disable_nsfw = True use_gpu = device == torch.device("cuda") and torch.cuda.is_available() torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32 @@ -95,7 +93,6 @@ class SD(DiffusionInpaintModel): self.model = load_from_local_model( kwargs["sd_local_model_path"], torch_dtype=torch_dtype, - disable_nsfw=disable_nsfw, ) else: self.model = StableDiffusionInpaintPipeline.from_pretrained(