70af4845af
new file: inpaint/__main__.py new file: inpaint/api.py new file: inpaint/batch_processing.py new file: inpaint/benchmark.py new file: inpaint/cli.py new file: inpaint/const.py new file: inpaint/download.py new file: inpaint/file_manager/__init__.py new file: inpaint/file_manager/file_manager.py new file: inpaint/file_manager/storage_backends.py new file: inpaint/file_manager/utils.py new file: inpaint/helper.py new file: inpaint/installer.py new file: inpaint/model/__init__.py new file: inpaint/model/anytext/__init__.py new file: inpaint/model/anytext/anytext_model.py new file: inpaint/model/anytext/anytext_pipeline.py new file: inpaint/model/anytext/anytext_sd15.yaml new file: inpaint/model/anytext/cldm/__init__.py new file: inpaint/model/anytext/cldm/cldm.py new file: inpaint/model/anytext/cldm/ddim_hacked.py new file: inpaint/model/anytext/cldm/embedding_manager.py new file: inpaint/model/anytext/cldm/hack.py new file: inpaint/model/anytext/cldm/model.py new file: inpaint/model/anytext/cldm/recognizer.py new file: inpaint/model/anytext/ldm/__init__.py new file: inpaint/model/anytext/ldm/models/__init__.py new file: inpaint/model/anytext/ldm/models/autoencoder.py new file: inpaint/model/anytext/ldm/models/diffusion/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/ddim.py new file: inpaint/model/anytext/ldm/models/diffusion/ddpm.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py new file: inpaint/model/anytext/ldm/models/diffusion/plms.py new file: inpaint/model/anytext/ldm/models/diffusion/sampling_util.py new file: inpaint/model/anytext/ldm/modules/__init__.py new file: inpaint/model/anytext/ldm/modules/attention.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/__init__.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/model.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/util.py new file: inpaint/model/anytext/ldm/modules/distributions/__init__.py new file: inpaint/model/anytext/ldm/modules/distributions/distributions.py new file: inpaint/model/anytext/ldm/modules/ema.py new file: inpaint/model/anytext/ldm/modules/encoders/__init__.py new file: inpaint/model/anytext/ldm/modules/encoders/modules.py new file: inpaint/model/anytext/ldm/util.py new file: inpaint/model/anytext/main.py new file: inpaint/model/anytext/ocr_recog/RNN.py new file: inpaint/model/anytext/ocr_recog/RecCTCHead.py new file: inpaint/model/anytext/ocr_recog/RecModel.py new file: inpaint/model/anytext/ocr_recog/RecMv1_enhance.py new file: inpaint/model/anytext/ocr_recog/RecSVTR.py new file: inpaint/model/anytext/ocr_recog/__init__.py new file: inpaint/model/anytext/ocr_recog/common.py new file: inpaint/model/anytext/ocr_recog/en_dict.txt new file: inpaint/model/anytext/ocr_recog/ppocr_keys_v1.txt new file: inpaint/model/anytext/utils.py new file: inpaint/model/base.py new file: inpaint/model/brushnet/__init__.py new file: inpaint/model/brushnet/brushnet.py new file: inpaint/model/brushnet/brushnet_unet_forward.py new file: inpaint/model/brushnet/brushnet_wrapper.py new file: inpaint/model/brushnet/pipeline_brushnet.py new file: inpaint/model/brushnet/unet_2d_blocks.py new file: inpaint/model/controlnet.py new file: inpaint/model/ddim_sampler.py new file: inpaint/model/fcf.py new file: inpaint/model/helper/__init__.py new file: inpaint/model/helper/controlnet_preprocess.py new file: inpaint/model/helper/cpu_text_encoder.py new file: inpaint/model/helper/g_diffuser_bot.py new file: inpaint/model/instruct_pix2pix.py new file: inpaint/model/kandinsky.py new file: inpaint/model/lama.py new file: inpaint/model/ldm.py new file: inpaint/model/manga.py new file: inpaint/model/mat.py new file: inpaint/model/mi_gan.py new file: inpaint/model/opencv2.py new file: inpaint/model/original_sd_configs/__init__.py new file: inpaint/model/original_sd_configs/sd_xl_base.yaml new file: inpaint/model/original_sd_configs/sd_xl_refiner.yaml new file: inpaint/model/original_sd_configs/v1-inference.yaml new file: inpaint/model/original_sd_configs/v2-inference-v.yaml new file: inpaint/model/paint_by_example.py new file: inpaint/model/plms_sampler.py new file: inpaint/model/power_paint/__init__.py new file: inpaint/model/power_paint/pipeline_powerpaint.py new file: inpaint/model/power_paint/power_paint.py new file: inpaint/model/power_paint/power_paint_v2.py new file: inpaint/model/power_paint/powerpaint_tokenizer.py
241 lines
7.3 KiB
Python
241 lines
7.3 KiB
Python
import os
|
|
|
|
from loguru import logger
|
|
|
|
from iopaint.tests.utils import check_device, get_config, assert_equal
|
|
|
|
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from iopaint.model_manager import ModelManager
|
|
from iopaint.schema import HDStrategy, SDSampler
|
|
|
|
current_dir = Path(__file__).parent.absolute().resolve()
|
|
save_dir = current_dir / "result"
|
|
save_dir.mkdir(exist_ok=True, parents=True)
|
|
|
|
|
|
@pytest.mark.parametrize("device", ["cuda", "mps"])
|
|
def test_runway_sd_1_5_all_samplers(device):
|
|
sd_steps = check_device(device)
|
|
model = ModelManager(
|
|
name="runwayml/stable-diffusion-inpainting",
|
|
device=torch.device(device),
|
|
disable_nsfw=True,
|
|
sd_cpu_textencoder=False,
|
|
)
|
|
|
|
all_samplers = [member.value for member in SDSampler.__members__.values()]
|
|
print(all_samplers)
|
|
for sampler in all_samplers:
|
|
print(f"Testing sampler {sampler}")
|
|
if (
|
|
sampler
|
|
in [SDSampler.dpm2_karras, SDSampler.dpm2_a_karras, SDSampler.lms_karras]
|
|
and device == "mps"
|
|
):
|
|
# diffusers 0.25.0 still has bug on these sampler on mps, wait main branch released to fix it
|
|
logger.warning(
|
|
"skip dpm2_karras on mps, diffusers does not support it on mps. TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead."
|
|
)
|
|
continue
|
|
cfg = get_config(
|
|
strategy=HDStrategy.ORIGINAL,
|
|
prompt="a fox sitting on a bench",
|
|
sd_steps=sd_steps,
|
|
sd_sampler=sampler,
|
|
)
|
|
|
|
name = f"device_{device}_{sampler}"
|
|
|
|
assert_equal(
|
|
model,
|
|
cfg,
|
|
f"runway_sd_{name}.png",
|
|
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
|
|
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
|
|
@pytest.mark.parametrize("sampler", [SDSampler.lcm])
|
|
def test_runway_sd_lcm_lora(device, sampler):
|
|
check_device(device)
|
|
|
|
sd_steps = 5
|
|
model = ModelManager(
|
|
name="runwayml/stable-diffusion-inpainting",
|
|
device=torch.device(device),
|
|
disable_nsfw=True,
|
|
sd_cpu_textencoder=False,
|
|
)
|
|
cfg = get_config(
|
|
strategy=HDStrategy.ORIGINAL,
|
|
prompt="face of a fox, sitting on a bench",
|
|
sd_steps=sd_steps,
|
|
sd_guidance_scale=2,
|
|
sd_lcm_lora=True,
|
|
)
|
|
cfg.sd_sampler = sampler
|
|
|
|
assert_equal(
|
|
model,
|
|
cfg,
|
|
f"runway_sd_1_5_lcm_lora_device_{device}.png",
|
|
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
|
|
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("device", ["cuda", "mps"])
|
|
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
|
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
|
|
def test_runway_sd_sd_strength(device, strategy, sampler):
|
|
sd_steps = check_device(device)
|
|
model = ModelManager(
|
|
name="runwayml/stable-diffusion-inpainting",
|
|
device=torch.device(device),
|
|
disable_nsfw=True,
|
|
sd_cpu_textencoder=False,
|
|
)
|
|
cfg = get_config(
|
|
strategy=strategy,
|
|
prompt="a fox sitting on a bench",
|
|
sd_steps=sd_steps,
|
|
sd_strength=0.8,
|
|
)
|
|
cfg.sd_sampler = sampler
|
|
|
|
assert_equal(
|
|
model,
|
|
cfg,
|
|
f"runway_sd_strength_0.8_device_{device}.png",
|
|
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
|
|
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("device", ["cuda", "cpu"])
|
|
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
|
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
|
|
def test_runway_sd_cpu_textencoder(device, strategy, sampler):
|
|
sd_steps = check_device(device)
|
|
model = ModelManager(
|
|
name="runwayml/stable-diffusion-inpainting",
|
|
device=torch.device(device),
|
|
disable_nsfw=True,
|
|
sd_cpu_textencoder=True,
|
|
)
|
|
cfg = get_config(
|
|
strategy=strategy,
|
|
prompt="a fox sitting on a bench",
|
|
sd_steps=sd_steps,
|
|
sd_sampler=sampler,
|
|
)
|
|
|
|
assert_equal(
|
|
model,
|
|
cfg,
|
|
f"runway_sd_device_{device}_cpu_textencoder.png",
|
|
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
|
|
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
|
|
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
|
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
|
|
def test_runway_norm_sd_model(device, strategy, sampler):
|
|
sd_steps = check_device(device)
|
|
model = ModelManager(
|
|
name="runwayml/stable-diffusion-v1-5",
|
|
device=torch.device(device),
|
|
disable_nsfw=True,
|
|
sd_cpu_textencoder=False,
|
|
)
|
|
cfg = get_config(
|
|
strategy=strategy, prompt="face of a fox, sitting on a bench", sd_steps=sd_steps
|
|
)
|
|
cfg.sd_sampler = sampler
|
|
|
|
assert_equal(
|
|
model,
|
|
cfg,
|
|
f"runway_{device}_norm_sd_model_device_{device}.png",
|
|
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
|
|
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("device", ["cuda"])
|
|
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
|
|
@pytest.mark.parametrize("sampler", [SDSampler.dpm_plus_plus_2m])
|
|
def test_runway_sd_1_5_cpu_offload(device, strategy, sampler):
|
|
sd_steps = check_device(device)
|
|
model = ModelManager(
|
|
name="runwayml/stable-diffusion-inpainting",
|
|
device=torch.device(device),
|
|
disable_nsfw=True,
|
|
sd_cpu_textencoder=False,
|
|
cpu_offload=True,
|
|
)
|
|
cfg = get_config(
|
|
strategy=strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps
|
|
)
|
|
cfg.sd_sampler = sampler
|
|
|
|
name = f"device_{device}_{sampler}"
|
|
|
|
assert_equal(
|
|
model,
|
|
cfg,
|
|
f"runway_sd_{strategy.capitalize()}_{name}_cpu_offload.png",
|
|
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
|
|
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
|
|
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
|
|
@pytest.mark.parametrize(
|
|
"name",
|
|
[
|
|
"sd-v1-5-inpainting.safetensors",
|
|
"v1-5-pruned-emaonly.safetensors",
|
|
"sd_xl_base_1.0.safetensors",
|
|
"sd_xl_base_1.0_inpainting_0.1.safetensors",
|
|
],
|
|
)
|
|
def test_local_file_path(device, sampler, name):
|
|
sd_steps = check_device(device)
|
|
model = ModelManager(
|
|
name=name,
|
|
device=torch.device(device),
|
|
disable_nsfw=True,
|
|
sd_cpu_textencoder=False,
|
|
cpu_offload=False,
|
|
)
|
|
cfg = get_config(
|
|
strategy=HDStrategy.ORIGINAL,
|
|
prompt="a fox sitting on a bench",
|
|
sd_steps=sd_steps,
|
|
)
|
|
cfg.sd_sampler = sampler
|
|
|
|
name = f"device_{device}_{sampler}_{name}"
|
|
|
|
is_sdxl = "sd_xl" in name
|
|
|
|
assert_equal(
|
|
model,
|
|
cfg,
|
|
f"sd_local_model_{name}.png",
|
|
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
|
|
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
|
|
fx=1.5 if is_sdxl else 1,
|
|
fy=1.5 if is_sdxl else 1,
|
|
)
|