update
This commit is contained in:
parent
8f942e27c4
commit
0cfec489b7
@ -33,10 +33,10 @@ AVAILABLE_MODELS = [
|
|||||||
"paint_by_example",
|
"paint_by_example",
|
||||||
"instruct_pix2pix",
|
"instruct_pix2pix",
|
||||||
"kandinsky2.2",
|
"kandinsky2.2",
|
||||||
"sdxl"
|
"sdxl",
|
||||||
]
|
]
|
||||||
SD15_MODELS = ["sd1.5", "anything4", "realisticVision1.4"]
|
SD15_MODELS = ["sd1.5", "anything4", "realisticVision1.4"]
|
||||||
MODELS_SUPPORT_FREEU = SD15_MODELS + ['sd2', "sdxl"]
|
MODELS_SUPPORT_FREEU = SD15_MODELS + ["sd2", "sdxl"]
|
||||||
MODELS_SUPPORT_LCM_LORA = SD15_MODELS + ["sdxl"]
|
MODELS_SUPPORT_LCM_LORA = SD15_MODELS + ["sdxl"]
|
||||||
|
|
||||||
AVAILABLE_DEVICES = ["cuda", "cpu", "mps"]
|
AVAILABLE_DEVICES = ["cuda", "cpu", "mps"]
|
||||||
@ -110,6 +110,14 @@ QUALITY_HELP = """
|
|||||||
Quality of image encoding, 0-100. Default is 95, higher quality will generate larger file size.
|
Quality of image encoding, 0-100. Default is 95, higher quality will generate larger file size.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
FREEU_DEFAULT_CONFIGS = {
|
||||||
|
"sd2": dict(s1=0.9, s2=0.2, b1=1.1, b2=1.2),
|
||||||
|
"sdxl": dict(s1=0.6, s2=0.4, b1=1.1, b2=1.2),
|
||||||
|
"sd1.5": dict(s1=0.9, s2=0.2, b1=1.2, b2=1.4),
|
||||||
|
"anything4": dict(s1=0.9, s2=0.2, b1=1.2, b2=1.4),
|
||||||
|
"realisticVision1.4": dict(s1=0.9, s2=0.2, b1=1.2, b2=1.4),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class RealESRGANModelName(str, Enum):
|
class RealESRGANModelName(str, Enum):
|
||||||
realesr_general_x4v3 = "realesr-general-x4v3"
|
realesr_general_x4v3 = "realesr-general-x4v3"
|
||||||
|
@ -12,8 +12,9 @@ from lama_cleaner.model.utils import torch_gc, get_scheduler
|
|||||||
from lama_cleaner.schema import Config
|
from lama_cleaner.schema import Config
|
||||||
|
|
||||||
|
|
||||||
class CPUTextEncoderWrapper:
|
class CPUTextEncoderWrapper(torch.nn.Module):
|
||||||
def __init__(self, text_encoder, torch_dtype):
|
def __init__(self, text_encoder, torch_dtype):
|
||||||
|
super().__init__()
|
||||||
self.config = text_encoder.config
|
self.config = text_encoder.config
|
||||||
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
|
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
|
||||||
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
|
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
|
||||||
|
@ -285,6 +285,28 @@ class StableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetPipeline
|
|||||||
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
|
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
|
||||||
return mask, masked_image_latents
|
return mask, masked_image_latents
|
||||||
|
|
||||||
|
def _default_height_width(self, height, width, image):
|
||||||
|
if isinstance(image, list):
|
||||||
|
image = image[0]
|
||||||
|
|
||||||
|
if height is None:
|
||||||
|
if isinstance(image, PIL.Image.Image):
|
||||||
|
height = image.height
|
||||||
|
elif isinstance(image, torch.Tensor):
|
||||||
|
height = image.shape[3]
|
||||||
|
|
||||||
|
height = (height // 8) * 8 # round down to nearest multiple of 8
|
||||||
|
|
||||||
|
if width is None:
|
||||||
|
if isinstance(image, PIL.Image.Image):
|
||||||
|
width = image.width
|
||||||
|
elif isinstance(image, torch.Tensor):
|
||||||
|
width = image.shape[2]
|
||||||
|
|
||||||
|
width = (width // 8) * 8 # round down to nearest multiple of 8
|
||||||
|
|
||||||
|
return height, width
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
||||||
def __call__(
|
def __call__(
|
||||||
@ -402,14 +424,11 @@ class StableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetPipeline
|
|||||||
|
|
||||||
# 1. Check inputs. Raise error if not correct
|
# 1. Check inputs. Raise error if not correct
|
||||||
self.check_inputs(
|
self.check_inputs(
|
||||||
prompt,
|
prompt=prompt,
|
||||||
control_image,
|
image=control_image,
|
||||||
height,
|
callback_steps=callback_steps,
|
||||||
width,
|
prompt_embeds=prompt_embeds,
|
||||||
callback_steps,
|
negative_prompt_embeds=negative_prompt_embeds,
|
||||||
negative_prompt,
|
|
||||||
prompt_embeds,
|
|
||||||
negative_prompt_embeds,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# 2. Define call parameters
|
# 2. Define call parameters
|
||||||
|
@ -7,12 +7,13 @@ import torch
|
|||||||
from loguru import logger
|
from loguru import logger
|
||||||
|
|
||||||
from lama_cleaner.model.base import DiffusionInpaintModel
|
from lama_cleaner.model.base import DiffusionInpaintModel
|
||||||
from lama_cleaner.model.utils import torch_gc, get_scheduler
|
from lama_cleaner.model.utils import torch_gc
|
||||||
from lama_cleaner.schema import Config, SDSampler
|
from lama_cleaner.schema import Config
|
||||||
|
|
||||||
|
|
||||||
class CPUTextEncoderWrapper:
|
class CPUTextEncoderWrapper(torch.nn.Module):
|
||||||
def __init__(self, text_encoder, torch_dtype):
|
def __init__(self, text_encoder, torch_dtype):
|
||||||
|
super().__init__()
|
||||||
self.config = text_encoder.config
|
self.config = text_encoder.config
|
||||||
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
|
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
|
||||||
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
|
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
|
||||||
|
@ -18,7 +18,12 @@ import torch
|
|||||||
from PIL import Image
|
from PIL import Image
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
|
|
||||||
from lama_cleaner.const import SD15_MODELS
|
from lama_cleaner.const import (
|
||||||
|
SD15_MODELS,
|
||||||
|
FREEU_DEFAULT_CONFIGS,
|
||||||
|
MODELS_SUPPORT_FREEU,
|
||||||
|
MODELS_SUPPORT_LCM_LORA,
|
||||||
|
)
|
||||||
from lama_cleaner.file_manager import FileManager
|
from lama_cleaner.file_manager import FileManager
|
||||||
from lama_cleaner.model.utils import torch_gc
|
from lama_cleaner.model.utils import torch_gc
|
||||||
from lama_cleaner.model_manager import ModelManager
|
from lama_cleaner.model_manager import ModelManager
|
||||||
@ -421,6 +426,9 @@ def get_server_config():
|
|||||||
"isEnableAutoSaving": is_enable_auto_saving,
|
"isEnableAutoSaving": is_enable_auto_saving,
|
||||||
"enableFileManager": is_enable_file_manager,
|
"enableFileManager": is_enable_file_manager,
|
||||||
"plugins": list(plugins.keys()),
|
"plugins": list(plugins.keys()),
|
||||||
|
"freeSupportedModels": MODELS_SUPPORT_FREEU,
|
||||||
|
"freeuDefaultConfigs": FREEU_DEFAULT_CONFIGS,
|
||||||
|
"lcmLoraSupportedModels": MODELS_SUPPORT_LCM_LORA,
|
||||||
}, 200
|
}, 200
|
||||||
|
|
||||||
|
|
||||||
|
@ -10,13 +10,13 @@ from lama_cleaner.schema import HDStrategy
|
|||||||
current_dir = Path(__file__).parent.absolute().resolve()
|
current_dir = Path(__file__).parent.absolute().resolve()
|
||||||
save_dir = current_dir / 'result'
|
save_dir = current_dir / 'result'
|
||||||
save_dir.mkdir(exist_ok=True, parents=True)
|
save_dir.mkdir(exist_ok=True, parents=True)
|
||||||
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
device = 'cuda' if torch.cuda.is_available() else 'mps'
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("disable_nsfw", [True, False])
|
@pytest.mark.parametrize("disable_nsfw", [True, False])
|
||||||
@pytest.mark.parametrize("cpu_offload", [False, True])
|
@pytest.mark.parametrize("cpu_offload", [False, True])
|
||||||
def test_instruct_pix2pix(disable_nsfw, cpu_offload):
|
def test_instruct_pix2pix(disable_nsfw, cpu_offload):
|
||||||
sd_steps = 50 if device == 'cuda' else 1
|
sd_steps = 50 if device == 'cuda' else 20
|
||||||
model = ModelManager(name="instruct_pix2pix",
|
model = ModelManager(name="instruct_pix2pix",
|
||||||
device=torch.device(device),
|
device=torch.device(device),
|
||||||
hf_access_token="",
|
hf_access_token="",
|
||||||
@ -41,7 +41,7 @@ def test_instruct_pix2pix(disable_nsfw, cpu_offload):
|
|||||||
@pytest.mark.parametrize("disable_nsfw", [False])
|
@pytest.mark.parametrize("disable_nsfw", [False])
|
||||||
@pytest.mark.parametrize("cpu_offload", [False])
|
@pytest.mark.parametrize("cpu_offload", [False])
|
||||||
def test_instruct_pix2pix_snow(disable_nsfw, cpu_offload):
|
def test_instruct_pix2pix_snow(disable_nsfw, cpu_offload):
|
||||||
sd_steps = 50 if device == 'cuda' else 1
|
sd_steps = 50 if device == 'cuda' else 20
|
||||||
model = ModelManager(name="instruct_pix2pix",
|
model = ModelManager(name="instruct_pix2pix",
|
||||||
device=torch.device(device),
|
device=torch.device(device),
|
||||||
hf_access_token="",
|
hf_access_token="",
|
||||||
|
@ -13,11 +13,9 @@ from lama_cleaner.tests.test_model import get_config, assert_equal
|
|||||||
current_dir = Path(__file__).parent.absolute().resolve()
|
current_dir = Path(__file__).parent.absolute().resolve()
|
||||||
save_dir = current_dir / "result"
|
save_dir = current_dir / "result"
|
||||||
save_dir.mkdir(exist_ok=True, parents=True)
|
save_dir.mkdir(exist_ok=True, parents=True)
|
||||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
||||||
device = torch.device(device)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("sd_device", ["cuda"])
|
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
|
||||||
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
||||||
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
|
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
|
||||||
@pytest.mark.parametrize("cpu_textencoder", [True, False])
|
@pytest.mark.parametrize("cpu_textencoder", [True, False])
|
||||||
@ -56,7 +54,7 @@ def test_runway_sd_1_5_ddim(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("sd_device", ["cuda"])
|
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
|
||||||
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"sampler", [SDSampler.pndm, SDSampler.k_lms, SDSampler.k_euler, SDSampler.k_euler_a]
|
"sampler", [SDSampler.pndm, SDSampler.k_lms, SDSampler.k_euler, SDSampler.k_euler_a]
|
||||||
@ -95,7 +93,7 @@ def test_runway_sd_1_5(sd_device, strategy, sampler, cpu_textencoder, disable_ns
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("sd_device", ["mps"])
|
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
|
||||||
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
||||||
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
|
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
|
||||||
@pytest.mark.parametrize("sd_prevent_unmasked_area", [False, True])
|
@pytest.mark.parametrize("sd_prevent_unmasked_area", [False, True])
|
||||||
@ -140,7 +138,7 @@ def test_runway_sd_1_5_negative_prompt(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("sd_device", ["cuda"])
|
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
|
||||||
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
||||||
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
|
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
|
||||||
@pytest.mark.parametrize("cpu_textencoder", [False])
|
@pytest.mark.parametrize("cpu_textencoder", [False])
|
||||||
@ -151,7 +149,7 @@ def test_runway_sd_1_5_sd_scale(
|
|||||||
if sd_device == "cuda" and not torch.cuda.is_available():
|
if sd_device == "cuda" and not torch.cuda.is_available():
|
||||||
return
|
return
|
||||||
|
|
||||||
sd_steps = 50 if sd_device == "cuda" else 1
|
sd_steps = 50 if sd_device == "cuda" else 20
|
||||||
model = ModelManager(
|
model = ModelManager(
|
||||||
name="sd1.5",
|
name="sd1.5",
|
||||||
device=torch.device(sd_device),
|
device=torch.device(sd_device),
|
||||||
@ -177,7 +175,7 @@ def test_runway_sd_1_5_sd_scale(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("sd_device", ["mps"])
|
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
|
||||||
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
||||||
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
|
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
|
||||||
def test_runway_sd_sd_strength(sd_device, strategy, sampler):
|
def test_runway_sd_sd_strength(sd_device, strategy, sampler):
|
||||||
@ -214,7 +212,7 @@ def test_runway_sd_1_5_cpu_offload(sd_device, strategy, sampler):
|
|||||||
if sd_device == "cuda" and not torch.cuda.is_available():
|
if sd_device == "cuda" and not torch.cuda.is_available():
|
||||||
return
|
return
|
||||||
|
|
||||||
sd_steps = 50 if sd_device == "cuda" else 1
|
sd_steps = 50 if sd_device == "cuda" else 20
|
||||||
model = ModelManager(
|
model = ModelManager(
|
||||||
name="sd1.5",
|
name="sd1.5",
|
||||||
device=torch.device(sd_device),
|
device=torch.device(sd_device),
|
||||||
@ -246,7 +244,7 @@ def test_local_file_path(sd_device, sampler):
|
|||||||
if sd_device == "cuda" and not torch.cuda.is_available():
|
if sd_device == "cuda" and not torch.cuda.is_available():
|
||||||
return
|
return
|
||||||
|
|
||||||
sd_steps = 1 if sd_device == "cpu" else 50
|
sd_steps = 1 if sd_device == "cpu" else 30
|
||||||
model = ModelManager(
|
model = ModelManager(
|
||||||
name="sd1.5",
|
name="sd1.5",
|
||||||
device=torch.device(sd_device),
|
device=torch.device(sd_device),
|
||||||
|
Loading…
Reference in New Issue
Block a user