This commit is contained in:
Qing 2023-12-28 10:48:52 +08:00
parent f0b852725f
commit 9cc9bd7a88
19 changed files with 345 additions and 482 deletions

1
.gitignore vendored
View File

@ -8,3 +8,4 @@ build
dist/
lama_cleaner.egg-info/
venv/
tmp/

View File

@ -72,7 +72,7 @@ def start(
enable_anime_seg: bool = Option(False, help=ANIMESEG_HELP),
enable_realesrgan: bool = Option(False),
realesrgan_device: Device = Option(Device.cpu),
realesrgan_model: str = Option(RealESRGANModel.realesr_general_x4v3),
realesrgan_model: RealESRGANModel = Option(RealESRGANModel.realesr_general_x4v3),
enable_gfpgan: bool = Option(False),
gfpgan_device: Device = Option(Device.cpu),
enable_restoreformer: bool = Option(False),

View File

@ -37,7 +37,7 @@ class ModelManager:
def init_model(self, name: str, device, **kwargs):
logger.info(f"Loading model: {name}")
if name not in self.available_models:
raise NotImplementedError(f"Unsupported model: {name}")
raise NotImplementedError(f"Unsupported model: {name}. Available models: {self.available_models.keys()}")
model_info = self.available_models[name]
kwargs = {

View File

@ -6,7 +6,7 @@ from .realesrgan import RealESRGANUpscaler
from .gfpgan_plugin import GFPGANPlugin
from .restoreformer import RestoreFormerPlugin
from .anime_seg import AnimeSeg
from ..const import InteractiveSegModel, Device
from ..const import InteractiveSegModel, Device, RealESRGANModel
def build_plugins(
@ -18,7 +18,7 @@ def build_plugins(
enable_anime_seg: bool,
enable_realesrgan: bool,
realesrgan_device: Device,
realesrgan_model: str,
realesrgan_model: RealESRGANModel,
enable_gfpgan: bool,
gfpgan_device: Device,
enable_restoreformer: bool,

View File

@ -507,7 +507,7 @@ def start(
enable_anime_seg: bool,
enable_realesrgan: bool,
realesrgan_device: Device,
realesrgan_model: str,
realesrgan_model: RealESRGANModel,
enable_gfpgan: bool,
gfpgan_device: Device,
enable_restoreformer: bool,
@ -525,6 +525,7 @@ def start(
output_dir, "lama_cleaner_thumbnails"
)
file_manager.output_dir = output_dir
global_config.file_manager = file_manager
else:
global_config.input_image_path = input

View File

@ -1,6 +1,7 @@
import os
from lama_cleaner.const import SD_CONTROLNET_CHOICES
from lama_cleaner.tests.utils import current_dir, check_device, get_config, assert_equal
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
@ -10,178 +11,107 @@ import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
model_name = "runwayml/stable-diffusion-inpainting"
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
@pytest.mark.parametrize("cpu_textencoder", [True])
@pytest.mark.parametrize("disable_nsfw", [True])
@pytest.mark.parametrize("sd_controlnet_method", SD_CONTROLNET_CHOICES)
def test_runway_sd_1_5(
sd_device, strategy, sampler, cpu_textencoder, disable_nsfw, sd_controlnet_method
):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
def convert_controlnet_method_name(name):
return name.replace("/", "--")
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize("controlnet_method", [SD_CONTROLNET_CHOICES[0]])
def test_runway_sd_1_5(device, controlnet_method):
sd_steps = check_device(device)
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name=model_name,
sd_controlnet=True,
device=torch.device(sd_device),
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
sd_controlnet_method=sd_controlnet_method,
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=True,
enable_controlnet=True,
controlnet_method=controlnet_method,
)
controlnet_conditioning_scale = {
"control_v11p_sd15_canny": 0.4,
"control_v11p_sd15_openpose": 0.4,
"control_v11p_sd15_inpaint": 1.0,
"control_v11f1p_sd15_depth": 1.0,
}[sd_controlnet_method]
cfg = get_config(
strategy,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_conditioning_scale=controlnet_conditioning_scale,
controlnet_method=sd_controlnet_method,
enable_controlnet=True,
controlnet_conditioning_scale=0.5,
controlnet_method=controlnet_method,
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_disable_nsfw"
name = f"device_{device}"
assert_equal(
model,
cfg,
f"sd_controlnet_{sd_controlnet_method}_{name}.png",
f"sd_controlnet_{convert_controlnet_method_name(controlnet_method)}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.2,
fy=1.2,
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_local_file_path(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
def test_controlnet_switch(device):
sd_steps = check_device(device)
model = ModelManager(
name=model_name,
sd_controlnet=True,
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_controlnet_method="control_v11p_sd15_canny",
enable_controlnet=True,
controlnet_method="lllyasviel/control_v11p_sd15_canny",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_method="control_v11p_sd15_canny",
enable_controlnet=True,
controlnet_method="lllyasviel/control_v11f1p_sd15_depth",
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_controlnet_canny_local_model_{name}.png",
f"controlnet_switch_canny_to_depth_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_local_file_path_controlnet_native_inpainting(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize(
"local_file", ["sd-v1-5-inpainting.ckpt", "v1-5-pruned-emaonly.safetensors"]
)
def test_local_file_path(device, local_file):
sd_steps = check_device(device)
controlnet_kwargs = dict(
enable_controlnet=True,
controlnet_method=SD_CONTROLNET_CHOICES[0],
)
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name=model_name,
sd_controlnet=True,
device=torch.device(sd_device),
name=local_file,
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_local_model_path="/Users/cwq/data/models/v1-5-pruned-emaonly.safetensors",
sd_controlnet_method="control_v11p_sd15_inpaint",
**controlnet_kwargs,
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_conditioning_scale=1.0,
sd_strength=1.0,
controlnet_method="control_v11p_sd15_inpaint",
**controlnet_kwargs,
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
name = f"device_{device}"
assert_equal(
model,
cfg,
f"sd_controlnet_local_native_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_controlnet_switch(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name=model_name,
sd_controlnet=True,
device=torch.device(sd_device),
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_controlnet_method="control_v11p_sd15_canny",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_method="control_v11p_sd15_inpaint",
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_controlnet_switch_to_inpaint_local_model_{name}.png",
f"{controlnet_kwargs['controlnet_method']}_local_model_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)

View File

@ -4,20 +4,17 @@ import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.tests.test_model import get_config, assert_equal
from lama_cleaner.schema import HDStrategy
from lama_cleaner.tests.utils import get_config, check_device, assert_equal, current_dir
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "mps"
model_name = "timbrooks/instruct-pix2pix"
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize("disable_nsfw", [True, False])
@pytest.mark.parametrize("cpu_offload", [False, True])
def test_instruct_pix2pix(disable_nsfw, cpu_offload):
sd_steps = 50 if device == "cuda" else 20
def test_instruct_pix2pix(device, disable_nsfw, cpu_offload):
sd_steps = check_device(device)
model = ModelManager(
name=model_name,
device=torch.device(device),
@ -42,31 +39,3 @@ def test_instruct_pix2pix(disable_nsfw, cpu_offload):
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("disable_nsfw", [False])
@pytest.mark.parametrize("cpu_offload", [False])
def test_instruct_pix2pix_snow(disable_nsfw, cpu_offload):
sd_steps = 50 if device == "cuda" else 20
model = ModelManager(
name=model_name,
device=torch.device(device),
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=False,
cpu_offload=cpu_offload,
)
cfg = get_config(
strategy=HDStrategy.ORIGINAL,
prompt="What if it were snowing?",
p2p_steps=sd_steps,
)
name = f"snow"
assert_equal(
model,
cfg,
f"instruct_pix2pix_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)

View File

@ -1,8 +1,6 @@
from pathlib import Path
from lama_cleaner.helper import load_img
from lama_cleaner.tests.utils import current_dir
current_dir = Path(__file__).parent.absolute().resolve()
png_img_p = current_dir / "image.png"
jpg_img_p = current_dir / "bunny.jpeg"

View File

@ -1,100 +1,42 @@
from pathlib import Path
import cv2
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import Config, HDStrategy, LDMSampler, SDSampler
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
def get_data(
fx: float = 1,
fy: float = 1.0,
img_p=current_dir / "image.png",
mask_p=current_dir / "mask.png",
):
img = cv2.imread(str(img_p))
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
mask = cv2.imread(str(mask_p), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, None, fx=fx, fy=fy, interpolation=cv2.INTER_AREA)
mask = cv2.resize(mask, None, fx=fx, fy=fy, interpolation=cv2.INTER_NEAREST)
return img, mask
def get_config(strategy, **kwargs):
data = dict(
ldm_steps=1,
ldm_sampler=LDMSampler.plms,
hd_strategy=strategy,
hd_strategy_crop_margin=32,
hd_strategy_crop_trigger_size=200,
hd_strategy_resize_limit=200,
)
data.update(**kwargs)
return Config(**data)
def assert_equal(
model,
config,
gt_name,
fx: float = 1,
fy: float = 1,
img_p=current_dir / "image.png",
mask_p=current_dir / "mask.png",
):
img, mask = get_data(fx=fx, fy=fy, img_p=img_p, mask_p=mask_p)
print(f"Input image shape: {img.shape}")
res = model(img, mask, config)
cv2.imwrite(
str(save_dir / gt_name),
res,
[int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0],
)
"""
Note that JPEG is lossy compression, so even if it is the highest quality 100,
when the saved images is reloaded, a difference occurs with the original pixel value.
If you want to save the original images as it is, save it as PNG or BMP.
"""
# gt = cv2.imread(str(current_dir / gt_name), cv2.IMREAD_UNCHANGED)
# assert np.array_equal(res, gt)
from lama_cleaner.schema import HDStrategy, LDMSampler
from lama_cleaner.tests.utils import assert_equal, get_config, current_dir, check_device
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
def test_lama(strategy):
def test_lama(device, strategy):
check_device(device)
model = ModelManager(name="lama", device=device)
assert_equal(
model,
get_config(strategy),
get_config(strategy=strategy),
f"lama_{strategy[0].upper() + strategy[1:]}_result.png",
)
fx = 1.3
assert_equal(
model,
get_config(strategy),
get_config(strategy=strategy),
f"lama_{strategy[0].upper() + strategy[1:]}_fx_{fx}_result.png",
fx=1.3,
)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
@pytest.mark.parametrize("ldm_sampler", [LDMSampler.ddim, LDMSampler.plms])
def test_ldm(strategy, ldm_sampler):
def test_ldm(device, strategy, ldm_sampler):
check_device(device)
model = ModelManager(name="ldm", device=device)
cfg = get_config(strategy, ldm_sampler=ldm_sampler)
cfg = get_config(strategy=strategy, ldm_sampler=ldm_sampler)
assert_equal(
model, cfg, f"ldm_{strategy[0].upper() + strategy[1:]}_{ldm_sampler}_result.png"
)
@ -108,15 +50,15 @@ def test_ldm(strategy, ldm_sampler):
)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
@pytest.mark.parametrize("zits_wireframe", [False, True])
def test_zits(strategy, zits_wireframe):
def test_zits(device, strategy, zits_wireframe):
check_device(device)
model = ModelManager(name="zits", device=device)
cfg = get_config(strategy, zits_wireframe=zits_wireframe)
# os.environ['ZITS_DEBUG_LINE_PATH'] = str(current_dir / 'zits_debug_line.jpg')
# os.environ['ZITS_DEBUG_EDGE_PATH'] = str(current_dir / 'zits_debug_edge.jpg')
cfg = get_config(strategy=strategy, zits_wireframe=zits_wireframe)
assert_equal(
model,
cfg,
@ -132,27 +74,29 @@ def test_zits(strategy, zits_wireframe):
)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("no_half", [True, False])
def test_mat(strategy, no_half):
def test_mat(device, strategy, no_half):
check_device(device)
model = ModelManager(name="mat", device=device, no_half=no_half)
cfg = get_config(strategy)
cfg = get_config(strategy=strategy)
for _ in range(10):
assert_equal(
model,
cfg,
f"mat_{strategy.capitalize()}_result.png",
)
assert_equal(
model,
cfg,
f"mat_{strategy.capitalize()}_result.png",
)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_fcf(strategy):
def test_fcf(device, strategy):
check_device(device)
model = ModelManager(name="fcf", device=device)
cfg = get_config(strategy)
cfg = get_config(strategy=strategy)
assert_equal(model, cfg, f"fcf_{strategy.capitalize()}_result.png", fx=2, fy=2)
assert_equal(model, cfg, f"fcf_{strategy.capitalize()}_result.png", fx=3.8, fy=2)
@ -164,9 +108,9 @@ def test_fcf(strategy):
def test_cv2(strategy, cv2_flag, cv2_radius):
model = ModelManager(
name="cv2",
device=torch.device(device),
device=torch.device("cpu"),
)
cfg = get_config(strategy, cv2_flag=cv2_flag, cv2_radius=cv2_radius)
cfg = get_config(strategy=strategy, cv2_flag=cv2_flag, cv2_radius=cv2_radius)
assert_equal(
model,
cfg,
@ -176,15 +120,17 @@ def test_cv2(strategy, cv2_flag, cv2_radius):
)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
def test_manga(strategy):
def test_manga(device, strategy):
check_device(device)
model = ModelManager(
name="manga",
device=torch.device(device),
)
cfg = get_config(strategy)
cfg = get_config(strategy=strategy)
assert_equal(
model,
cfg,
@ -194,17 +140,21 @@ def test_manga(strategy):
)
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_mi_gan(strategy):
def test_mi_gan(device, strategy):
check_device(device)
model = ModelManager(
name="migan",
device=torch.device(device),
)
cfg = get_config(strategy)
cfg = get_config(strategy=strategy)
assert_equal(
model,
cfg,
f"migan_{strategy.capitalize()}.png",
f"migan_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.5,
fy=1.7
)

View File

@ -2,16 +2,9 @@ def test_load_model():
from lama_cleaner.plugins import InteractiveSeg
from lama_cleaner.model_manager import ModelManager
interactive_seg_model = InteractiveSeg('vit_l', 'cpu')
interactive_seg_model = InteractiveSeg("vit_l", "cpu")
models = [
"lama",
"ldm",
"zits",
"mat",
"fcf",
"manga",
]
models = ["lama", "ldm", "zits", "mat", "fcf", "manga", "migan"]
for m in models:
ModelManager(
name=m,
@ -21,25 +14,3 @@ def test_load_model():
sd_cpu_textencoder=True,
cpu_offload=True,
)
# def create_empty_file(tmp_dir, name):
# tmp_model_dir = os.path.join(tmp_dir, "torch", "hub", "checkpoints")
# Path(tmp_model_dir).mkdir(exist_ok=True, parents=True)
# path = os.path.join(tmp_model_dir, name)
# with open(path, "w") as f:
# f.write("1")
#
#
# def test_load_model_error():
# MODELS = [
# ("big-lama.pt", "e3aa4aaa15225a33ec84f9f4bc47e500"),
# ("cond_stage_model_encode.pt", "23239fc9081956a3e70de56472b3f296"),
# ("cond_stage_model_decode.pt", "fe419cd15a750d37a4733589d0d3585c"),
# ("diffusion.pt", "b0afda12bf790c03aba2a7431f11d22d"),
# ]
# with tempfile.TemporaryDirectory() as tmp_dir:
# os.environ["XDG_CACHE_HOME"] = tmp_dir
# for name, md5 in MODELS:
# create_empty_file(tmp_dir, name)
# test_load_model()

View File

@ -1,4 +1,3 @@
import logging
import os
from lama_cleaner.schema import Config
@ -13,8 +12,8 @@ from lama_cleaner.model_manager import ModelManager
def test_model_switch():
model = ModelManager(
name="runwayml/stable-diffusion-inpainting",
sd_controlnet=True,
sd_controlnet_method="lllyasviel/control_v11p_sd15_canny",
enable_controlnet=True,
controlnet_method="lllyasviel/control_v11p_sd15_canny",
device=torch.device("mps"),
disable_nsfw=True,
sd_cpu_textencoder=True,
@ -29,8 +28,8 @@ def test_controlnet_switch_onoff(caplog):
name = "runwayml/stable-diffusion-inpainting"
model = ModelManager(
name=name,
sd_controlnet=True,
sd_controlnet_method="lllyasviel/control_v11p_sd15_canny",
enable_controlnet=True,
controlnet_method="lllyasviel/control_v11p_sd15_canny",
device=torch.device("mps"),
disable_nsfw=True,
sd_cpu_textencoder=True,
@ -41,21 +40,21 @@ def test_controlnet_switch_onoff(caplog):
model.switch_controlnet_method(
Config(
name=name,
controlnet_enabled=False,
enable_controlnet=False,
)
)
assert "Disable controlnet" in caplog.text
def test_controlnet_switch_method(caplog):
def test_switch_controlnet_method(caplog):
name = "runwayml/stable-diffusion-inpainting"
old_method = "lllyasviel/control_v11p_sd15_canny"
new_method = "lllyasviel/control_v11p_sd15_openpose"
model = ModelManager(
name=name,
sd_controlnet=True,
sd_controlnet_method=old_method,
enable_controlnet=True,
controlnet_method=old_method,
device=torch.device("mps"),
disable_nsfw=True,
sd_cpu_textencoder=True,
@ -66,7 +65,7 @@ def test_controlnet_switch_method(caplog):
model.switch_controlnet_method(
Config(
name=name,
controlnet_enabled=True,
enable_controlnet=True,
controlnet_method=new_method,
)
)

View File

@ -1,5 +1,7 @@
import os
from lama_cleaner.tests.utils import current_dir, check_device
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
@ -10,15 +12,9 @@ from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
@pytest.mark.parametrize("name", ["runwayml/stable-diffusion-inpainting"])
@pytest.mark.parametrize("sd_device", ["mps"])
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize(
"rect",
[
@ -31,24 +27,22 @@ device = torch.device(device)
[-100, -100, 512 + 200, 512 + 200],
],
)
def test_outpainting(name, sd_device, rect):
def test_outpainting(name, device, rect):
sd_steps = check_device(device)
def callback(i, t, latents):
pass
if sd_device == "cuda" and not torch.cuda.is_available():
return
model = ModelManager(
name=name,
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
callback=callback,
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a dog sitting on a bench in the park",
sd_steps=50,
sd_steps=sd_steps,
use_extender=True,
extender_x=rect[0],
extender_y=rect[1],
@ -61,39 +55,37 @@ def test_outpainting(name, sd_device, rect):
assert_equal(
model,
cfg,
f"{name.replace('/', '--')}_outpainting_dpm++_{'_'.join(map(str, rect))}.png",
f"{name.replace('/', '--')}_outpainting_{'_'.join(map(str, rect))}_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("name", ["kandinsky-community/kandinsky-2-2-decoder-inpaint"])
@pytest.mark.parametrize("sd_device", ["mps"])
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize(
"rect",
[
[-128, -128, 768, 768],
],
)
def test_kandinsky_outpainting(name, sd_device, rect):
def test_kandinsky_outpainting(name, device, rect):
sd_steps = check_device(device)
def callback(i, t, latents):
pass
if sd_device == "cuda" and not torch.cuda.is_available():
return
model = ModelManager(
name=name,
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
callback=callback,
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a cat",
negative_prompt="lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature",
sd_steps=50,
sd_steps=sd_steps,
use_extender=True,
extender_x=rect[0],
extender_y=rect[1],
@ -106,9 +98,52 @@ def test_kandinsky_outpainting(name, sd_device, rect):
assert_equal(
model,
cfg,
f"{name.replace('/', '--')}_outpainting_dpm++_{'_'.join(map(str, rect))}.png",
f"{name.replace('/', '--')}_outpainting_{'_'.join(map(str, rect))}_device_{device}.png",
img_p=current_dir / "cat.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1,
fy=1,
)
@pytest.mark.parametrize("name", ["Sanster/PowerPaint-V1-stable-diffusion-inpainting"])
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize(
"rect",
[
[-100, -100, 512 + 200, 512 + 200],
],
)
def test_powerpaint_outpainting(name, device, rect):
sd_steps = check_device(device)
def callback(i, t, latents):
pass
model = ModelManager(
name=name,
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
callback=callback,
)
cfg = get_config(
prompt="a dog sitting on a bench in the park",
sd_steps=sd_steps,
use_extender=True,
extender_x=rect[0],
extender_y=rect[1],
extender_width=rect[2],
extender_height=rect[3],
sd_guidance_scale=8.0,
sd_sampler=SDSampler.dpm_plus_plus,
powerpaint_task="outpainting",
)
assert_equal(
model,
cfg,
f"{name.replace('/', '--')}_outpainting_{'_'.join(map(str, rect))}_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)

View File

@ -1,26 +1,24 @@
from pathlib import Path
import cv2
import pytest
import torch
from PIL import Image
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy
from lama_cleaner.tests.test_model import get_config, get_data
from lama_cleaner.tests.utils import (
current_dir,
get_config,
get_data,
save_dir,
check_device,
)
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "mps"
device = torch.device(device)
model_name = "Fantasy-Studio/Paint-by-Example"
def assert_equal(
model,
config,
gt_name,
save_name: str,
fx: float = 1,
fy: float = 1,
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
@ -38,46 +36,18 @@ def assert_equal(
print(f"Input image shape: {img.shape}, example_image: {example_image.shape}")
config.paint_by_example_example_image = Image.fromarray(example_image)
res = model(img, mask, config)
cv2.imwrite(str(save_dir / gt_name), res)
cv2.imwrite(str(save_dir / save_name), res)
def test_paint_by_example():
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
def test_paint_by_example(device):
sd_steps = check_device(device)
model = ModelManager(name=model_name, device=device, disable_nsfw=True)
cfg = get_config(HDStrategy.ORIGINAL, sd_steps=30)
cfg = get_config(strategy=HDStrategy.ORIGINAL, sd_steps=sd_steps)
assert_equal(
model,
cfg,
f"paint_by_example.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fy=0.9,
fx=1.3,
)
def test_paint_by_example_cpu_offload():
model = ModelManager(
name=model_name, device=device, cpu_offload=True, disable_nsfw=False
)
cfg = get_config(HDStrategy.ORIGINAL, sd_steps=30)
assert_equal(
model,
cfg,
f"paint_by_example_cpu_offload.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
def test_paint_by_example_cpu_offload_cpu_device():
model = ModelManager(
name=model_name, device=torch.device("cpu"), cpu_offload=True, disable_nsfw=True
)
cfg = get_config(HDStrategy.ORIGINAL, sd_steps=1)
assert_equal(
model,
cfg,
f"paint_by_example_cpu_offload_cpu_device.png",
f"paint_by_example_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fy=0.9,

View File

@ -3,13 +3,12 @@ import os
import time
from lama_cleaner.plugins.anime_seg import AnimeSeg
from lama_cleaner.tests.utils import check_device, current_dir, save_dir
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
import cv2
import pytest
import torch.cuda
from lama_cleaner.plugins import (
RemoveBG,
@ -19,9 +18,6 @@ from lama_cleaner.plugins import (
InteractiveSeg,
)
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
img_p = current_dir / "bunny.jpeg"
img_bytes = open(img_p, "rb").read()
bgr_img = cv2.imread(str(img_p))
@ -50,11 +46,7 @@ def test_anime_seg():
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_upscale(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
check_device(device)
model = RealESRGANUpscaler("realesr-general-x4v3", device)
res = model.forward(bgr_img, 2)
_save(res, f"test_upscale_x2_{device}.png")
@ -65,10 +57,7 @@ def test_upscale(device):
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_gfpgan(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
check_device(device)
model = GFPGANPlugin(device)
res = model(rgb_img, None, None)
_save(res, f"test_gfpgan_{device}.png")
@ -76,10 +65,7 @@ def test_gfpgan(device):
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_restoreformer(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
check_device(device)
model = RestoreFormerPlugin(device)
res = model(rgb_img, None, None)
_save(res, f"test_restoreformer_{device}.png")
@ -87,10 +73,7 @@ def test_restoreformer(device):
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_segment_anything(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
check_device(device)
img_md5 = hashlib.md5(img_bytes).hexdigest()
model = InteractiveSeg("vit_l", device)
new_mask = model.forward(rgb_img, [[448 // 2, 394 // 2, 1]], img_md5)

View File

@ -1,5 +1,7 @@
import os
from lama_cleaner.tests.utils import check_device, get_config, assert_equal
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
@ -8,14 +10,13 @@ import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler, FREEUConfig
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("device", ["cuda", "mps"])
@pytest.mark.parametrize(
"sampler",
[
@ -28,25 +29,24 @@ save_dir.mkdir(exist_ok=True, parents=True)
],
)
def test_runway_sd_1_5_all_samplers(
sd_device,
device,
sampler,
):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 30
sd_steps = check_device(device)
model = ModelManager(
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
)
cfg = get_config(
HDStrategy.ORIGINAL, prompt="a fox sitting on a bench", sd_steps=sd_steps
strategy=HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
name = f"device_{device}_{sampler}"
assert_equal(
model,
@ -57,22 +57,20 @@ def test_runway_sd_1_5_all_samplers(
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize("sampler", [SDSampler.lcm])
def test_runway_sd_lcm_lora(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
def test_runway_sd_lcm_lora(device, sampler):
check_device(device)
sd_steps = 5
model = ModelManager(
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
)
cfg = get_config(
strategy,
strategy=HDStrategy.ORIGINAL,
prompt="face of a fox, sitting on a bench",
sd_steps=sd_steps,
sd_guidance_scale=2,
@ -83,28 +81,24 @@ def test_runway_sd_lcm_lora(sd_device, strategy, sampler):
assert_equal(
model,
cfg,
f"runway_sd_1_5_lcm_lora.png",
f"runway_sd_1_5_lcm_lora_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_sd_freeu(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 30
def test_runway_sd_freeu(device, sampler):
sd_steps = check_device(device)
model = ModelManager(
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
)
cfg = get_config(
strategy,
strategy=HDStrategy.ORIGINAL,
prompt="face of a fox, sitting on a bench",
sd_steps=sd_steps,
sd_guidance_scale=7.5,
@ -116,85 +110,83 @@ def test_runway_sd_freeu(sd_device, strategy, sampler):
assert_equal(
model,
cfg,
f"runway_sd_1_5_freeu.png",
f"runway_sd_1_5_freeu_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_sd_sd_strength(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 30
def test_runway_sd_sd_strength(device, strategy, sampler):
sd_steps = check_device(device)
model = ModelManager(
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
)
cfg = get_config(
strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps, sd_strength=0.8
strategy=strategy,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
sd_strength=0.8,
)
cfg.sd_sampler = sampler
assert_equal(
model,
cfg,
f"runway_sd_strength_0.8.png",
f"runway_sd_strength_0.8_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_norm_sd_model(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 30
def test_runway_norm_sd_model(device, strategy, sampler):
sd_steps = check_device(device)
model = ModelManager(
name="runwayml/stable-diffusion-v1-5",
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
)
cfg = get_config(strategy, prompt="face of a fox, sitting on a bench", sd_steps=sd_steps)
cfg = get_config(
strategy=strategy, prompt="face of a fox, sitting on a bench", sd_steps=sd_steps
)
cfg.sd_sampler = sampler
assert_equal(
model,
cfg,
f"runway_{sd_device}_norm_sd_model.png",
f"runway_{device}_norm_sd_model_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
def test_runway_sd_1_5_cpu_offload(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 30
def test_runway_sd_1_5_cpu_offload(device, strategy, sampler):
sd_steps = check_device(device)
model = ModelManager(
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
)
cfg = get_config(strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps)
cfg = get_config(
strategy=strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
name = f"device_{device}_{sampler}"
assert_equal(
model,
@ -205,7 +197,7 @@ def test_runway_sd_1_5_cpu_offload(sd_device, strategy, sampler):
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
@pytest.mark.parametrize(
"name",
@ -215,26 +207,23 @@ def test_runway_sd_1_5_cpu_offload(sd_device, strategy, sampler):
"v1-5-pruned-emaonly.safetensors",
],
)
def test_local_file_path(sd_device, sampler, name):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 30
def test_local_file_path(device, sampler, name):
sd_steps = check_device(device)
model = ModelManager(
name=name,
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=False,
)
cfg = get_config(
HDStrategy.ORIGINAL,
strategy=HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_{name}"
name = f"device_{device}_{sampler}_{name}"
assert_equal(
model,

View File

@ -1,7 +1,8 @@
import os
from lama_cleaner.tests.utils import check_device, current_dir
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
import pytest
import torch
@ -10,31 +11,25 @@ from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler, FREEUConfig
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_sdxl(sd_device, strategy, sampler):
def test_sdxl(device, strategy, sampler):
sd_steps = check_device(device)
def callback(i, t, latents):
pass
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 20
model = ModelManager(
name="diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
callback=callback,
)
cfg = get_config(
strategy,
strategy=strategy,
prompt="face of a fox, sitting on a bench",
sd_steps=sd_steps,
sd_strength=1.0,
@ -42,12 +37,10 @@ def test_sdxl(sd_device, strategy, sampler):
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sdxl_{name}.png",
f"sdxl_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=2,
@ -55,26 +48,24 @@ def test_sdxl(sd_device, strategy, sampler):
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_sdxl_lcm_lora_and_freeu(sd_device, strategy, sampler):
def test_sdxl_lcm_lora_and_freeu(device, strategy, sampler):
sd_steps = check_device(device)
def callback(i, t, latents):
pass
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 5
model = ModelManager(
name="diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
callback=callback,
)
cfg = get_config(
strategy,
strategy=strategy,
prompt="face of a fox, sitting on a bench",
sd_steps=sd_steps,
sd_strength=1.0,
@ -83,7 +74,7 @@ def test_sdxl_lcm_lora_and_freeu(sd_device, strategy, sampler):
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
name = f"device_{device}_{sampler}"
assert_equal(
model,
@ -96,7 +87,7 @@ def test_sdxl_lcm_lora_and_freeu(sd_device, strategy, sampler):
)
cfg = get_config(
strategy,
strategy=strategy,
prompt="face of a fox, sitting on a bench",
sd_steps=sd_steps,
sd_guidance_scale=7.5,
@ -107,7 +98,7 @@ def test_sdxl_lcm_lora_and_freeu(sd_device, strategy, sampler):
assert_equal(
model,
cfg,
f"sdxl_{name}_freeu.png",
f"sdxl_{name}_freeu_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=2,
@ -115,28 +106,27 @@ def test_sdxl_lcm_lora_and_freeu(sd_device, strategy, sampler):
)
@pytest.mark.parametrize("sd_device", ["mps"])
@pytest.mark.parametrize("device", ["cuda", "mps"])
@pytest.mark.parametrize(
"rect",
[
[-128, -128, 1024, 1024],
],
)
def test_sdxl_outpainting(sd_device, rect):
if sd_device == "cuda" and not torch.cuda.is_available():
return
def test_sdxl_outpainting(device, rect):
sd_steps = check_device(device)
model = ModelManager(
name="diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
device=torch.device(sd_device),
device=torch.device(device),
disable_nsfw=True,
sd_cpu_textencoder=False,
)
cfg = get_config(
HDStrategy.ORIGINAL,
strategy=HDStrategy.ORIGINAL,
prompt="a dog sitting on a bench in the park",
sd_steps=20,
sd_steps=sd_steps,
use_extender=True,
extender_x=rect[0],
extender_y=rect[1],
@ -150,7 +140,7 @@ def test_sdxl_outpainting(sd_device, rect):
assert_equal(
model,
cfg,
f"sdxl_outpainting_dog_ddim_{'_'.join(map(str, rect))}.png",
f"sdxl_outpainting_dog_ddim_{'_'.join(map(str, rect))}_device_{device}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.5,

View File

@ -0,0 +1,75 @@
from pathlib import Path
import cv2
import pytest
import torch
from lama_cleaner.schema import LDMSampler, HDStrategy, Config, SDSampler
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
def check_device(device: str) -> int:
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("CUDA is not available, skip test on cuda")
if device == "mps" and not torch.backends.mps.is_available():
pytest.skip("mps is not available, skip test on mps")
steps = 1 if device == "cpu" else 20
return steps
def assert_equal(
model,
config,
gt_name,
fx: float = 1,
fy: float = 1,
img_p=current_dir / "image.png",
mask_p=current_dir / "mask.png",
):
img, mask = get_data(fx=fx, fy=fy, img_p=img_p, mask_p=mask_p)
print(f"Input image shape: {img.shape}")
res = model(img, mask, config)
ok = cv2.imwrite(
str(save_dir / gt_name),
res,
[int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0],
)
assert ok, save_dir / gt_name
"""
Note that JPEG is lossy compression, so even if it is the highest quality 100,
when the saved images is reloaded, a difference occurs with the original pixel value.
If you want to save the original images as it is, save it as PNG or BMP.
"""
# gt = cv2.imread(str(current_dir / gt_name), cv2.IMREAD_UNCHANGED)
# assert np.array_equal(res, gt)
def get_data(
fx: float = 1,
fy: float = 1.0,
img_p=current_dir / "image.png",
mask_p=current_dir / "mask.png",
):
img = cv2.imread(str(img_p))
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
mask = cv2.imread(str(mask_p), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, None, fx=fx, fy=fy, interpolation=cv2.INTER_AREA)
mask = cv2.resize(mask, None, fx=fx, fy=fy, interpolation=cv2.INTER_NEAREST)
return img, mask
def get_config(**kwargs):
data = dict(
sd_sampler=kwargs.get("sd_sampler", SDSampler.uni_pc),
ldm_steps=1,
ldm_sampler=LDMSampler.plms,
hd_strategy=kwargs.get("strategy", HDStrategy.ORIGINAL),
hd_strategy_crop_margin=32,
hd_strategy_crop_trigger_size=200,
hd_strategy_resize_limit=200,
)
data.update(**kwargs)
return Config(**data)

View File

@ -18,7 +18,7 @@ const SheetOverlay = React.forwardRef<
>(({ className, ...props }, ref) => (
<SheetPrimitive.Overlay
className={cn(
"fixed inset-0 z-50 bg-background/80 backdrop-blur-sm data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0",
"fixed inset-0 z-50 bg-background/80 backdrop-blur-sm data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0",
className
)}
{...props}
@ -28,7 +28,7 @@ const SheetOverlay = React.forwardRef<
SheetOverlay.displayName = SheetPrimitive.Overlay.displayName
const sheetVariants = cva(
"fixed z-50 gap-4 bg-background p-6 shadow-lg transition ease-in-out data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:duration-200 data-[state=open]:duration-300",
"fixed z-50 gap-4 bg-background p-6 shadow-lg transition ease-in-out data-[state=closed]:duration-200 data-[state=open]:duration-300",
{
variants: {
side: {

View File

@ -109,6 +109,7 @@ type ServerConfig = {
enableAutoSaving: boolean
enableControlnet: boolean
controlnetMethod: string
disableModelSwitch: boolean
isDesktop: boolean
}
@ -279,6 +280,7 @@ const defaultValues: AppState = {
enableAutoSaving: false,
enableControlnet: false,
controlnetMethod: "lllyasviel/control_v11p_sd15_canny",
disableModelSwitch: false,
isDesktop: false,
},
settings: {