IOPaint/lama_cleaner/tests/test_plugins.py

92 lines
2.6 KiB
Python
Raw Normal View History

2023-04-06 15:55:20 +02:00
import hashlib
import os
import time
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
2023-03-22 05:57:18 +01:00
from pathlib import Path
import cv2
2023-03-26 14:42:31 +02:00
import pytest
import torch.cuda
2023-03-22 05:57:18 +01:00
2023-03-30 15:06:07 +02:00
from lama_cleaner.plugins import (
RemoveBG,
RealESRGANUpscaler,
GFPGANPlugin,
RestoreFormerPlugin,
2023-04-06 15:55:20 +02:00
InteractiveSeg,
2023-03-30 15:06:07 +02:00
)
2023-03-22 05:57:18 +01:00
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
img_p = current_dir / "bunny.jpeg"
2023-04-06 15:55:20 +02:00
img_bytes = open(img_p, "rb").read()
2023-03-26 14:42:31 +02:00
bgr_img = cv2.imread(str(img_p))
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
def _save(img, name):
cv2.imwrite(str(save_dir / name), img)
2023-03-22 05:57:18 +01:00
def test_remove_bg():
model = RemoveBG()
2023-03-26 14:42:31 +02:00
res = model.forward(bgr_img)
_save(res, "test_remove_bg.png")
2023-03-28 10:36:41 +02:00
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
2023-03-26 14:42:31 +02:00
def test_upscale(device):
if device == "cuda" and not torch.cuda.is_available():
return
2023-03-28 10:36:41 +02:00
if device == "mps" and not torch.backends.mps.is_available():
return
2023-03-26 14:42:31 +02:00
model = RealESRGANUpscaler("realesr-general-x4v3", device)
res = model.forward(bgr_img, 2)
2023-03-28 10:36:41 +02:00
_save(res, f"test_upscale_x2_{device}.png")
2023-03-22 05:57:18 +01:00
2023-03-26 14:42:31 +02:00
res = model.forward(bgr_img, 4)
2023-03-28 10:36:41 +02:00
_save(res, f"test_upscale_x4_{device}.png")
2023-03-22 05:57:18 +01:00
2023-03-28 10:36:41 +02:00
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
2023-03-26 14:42:31 +02:00
def test_gfpgan(device):
if device == "cuda" and not torch.cuda.is_available():
return
2023-03-28 10:36:41 +02:00
if device == "mps" and not torch.backends.mps.is_available():
return
2023-03-26 14:42:31 +02:00
model = GFPGANPlugin(device)
res = model(rgb_img, None, None)
2023-03-28 10:36:41 +02:00
_save(res, f"test_gfpgan_{device}.png")
2023-03-30 15:06:07 +02:00
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_restoreformer(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
model = RestoreFormerPlugin(device)
res = model(rgb_img, None, None)
_save(res, f"test_restoreformer_{device}.png")
2023-04-06 15:55:20 +02:00
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_segment_anything(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
img_md5 = hashlib.md5(img_bytes).hexdigest()
model = InteractiveSeg("vit_l", device)
new_mask = model.forward(rgb_img, [[448 // 2, 394 // 2, 1]], img_md5)
save_name = f"test_segment_anything_{device}.png"
_save(new_mask, save_name)
start = time.time()
model.forward(rgb_img, [[448 // 2, 394 // 2, 1]], img_md5)
print(f"Time for {save_name}: {time.time() - start:.2f}s")