use from_single_file

This commit is contained in:
Qing 2023-11-16 14:09:08 +08:00
parent bfd33ef37f
commit 20e660aa4a
2 changed files with 12 additions and 37 deletions

View File

@ -34,37 +34,6 @@ class CPUTextEncoderWrapper(torch.nn.Module):
return self.torch_dtype return self.torch_dtype
def load_from_local_model(local_model_path, torch_dtype, disable_nsfw=True):
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
download_from_original_stable_diffusion_ckpt,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
logger.info(f"Converting {local_model_path} to diffusers pipeline")
pipe = download_from_original_stable_diffusion_ckpt(
local_model_path,
num_in_channels=9,
from_safetensors=local_model_path.endswith("safetensors"),
device="cpu",
)
inpaint_pipe = StableDiffusionInpaintPipeline(
vae=pipe.vae,
text_encoder=pipe.text_encoder,
tokenizer=pipe.tokenizer,
unet=pipe.unet,
scheduler=pipe.scheduler,
safety_checker=None if disable_nsfw else pipe.safety_checker,
feature_extractor=None if disable_nsfw else pipe.safety_checker,
requires_safety_checker=not disable_nsfw,
)
del pipe
gc.collect()
return inpaint_pipe.to(torch_dtype=torch_dtype)
class SD(DiffusionInpaintModel): class SD(DiffusionInpaintModel):
pad_mod = 8 pad_mod = 8
min_size = 512 min_size = 512
@ -92,9 +61,8 @@ class SD(DiffusionInpaintModel):
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32 torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
if kwargs.get("sd_local_model_path", None): if kwargs.get("sd_local_model_path", None):
self.model = load_from_local_model( self.model = StableDiffusionInpaintPipeline.from_single_file(
kwargs["sd_local_model_path"], kwargs["sd_local_model_path"], torch_dtype=torch_dtype, **model_kwargs
torch_dtype=torch_dtype,
) )
else: else:
self.model = StableDiffusionInpaintPipeline.from_pretrained( self.model = StableDiffusionInpaintPipeline.from_pretrained(

View File

@ -240,7 +240,14 @@ def test_runway_sd_1_5_cpu_offload(sd_device, strategy, sampler):
@pytest.mark.parametrize("sd_device", ["cuda", "mps"]) @pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc]) @pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_local_file_path(sd_device, sampler): @pytest.mark.parametrize(
"local_model_path",
[
"/Users/cwq/data/models/sd-v1-5-inpainting.ckpt",
"/Users/cwq/data/models/sd-v1-5-inpainting.safetensors",
],
)
def test_local_file_path(sd_device, sampler, local_model_path):
if sd_device == "cuda" and not torch.cuda.is_available(): if sd_device == "cuda" and not torch.cuda.is_available():
return return
@ -253,7 +260,7 @@ def test_local_file_path(sd_device, sampler):
disable_nsfw=True, disable_nsfw=True,
sd_cpu_textencoder=False, sd_cpu_textencoder=False,
cpu_offload=True, cpu_offload=True,
sd_local_model_path="/Users/cwq/data/models/sd-v1-5-inpainting.ckpt", sd_local_model_path=local_model_path,
) )
cfg = get_config( cfg = get_config(
HDStrategy.ORIGINAL, HDStrategy.ORIGINAL,
@ -262,7 +269,7 @@ def test_local_file_path(sd_device, sampler):
) )
cfg.sd_sampler = sampler cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}" name = f"device_{sd_device}_{sampler}_{Path(local_model_path).stem}"
assert_equal( assert_equal(
model, model,