2022-11-18 14:40:12 +01:00
|
|
|
import os
|
2022-11-18 16:07:02 +01:00
|
|
|
import random
|
2022-11-18 14:40:12 +01:00
|
|
|
|
|
|
|
import cv2
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
|
|
|
import time
|
|
|
|
from loguru import logger
|
|
|
|
|
|
|
|
from lama_cleaner.helper import get_cache_path_by_url, load_jit_model
|
|
|
|
from lama_cleaner.model.base import InpaintModel
|
|
|
|
from lama_cleaner.schema import Config
|
|
|
|
|
|
|
|
|
|
|
|
MANGA_INPAINTOR_MODEL_URL = os.environ.get(
|
|
|
|
"MANGA_INPAINTOR_MODEL_URL",
|
2023-02-26 02:19:48 +01:00
|
|
|
"https://github.com/Sanster/models/releases/download/manga/manga_inpaintor.jit",
|
|
|
|
)
|
|
|
|
MANGA_INPAINTOR_MODEL_MD5 = os.environ.get(
|
|
|
|
"MANGA_INPAINTOR_MODEL_MD5", "7d8b269c4613b6b3768af714610da86c"
|
2022-11-18 14:40:12 +01:00
|
|
|
)
|
2023-02-26 02:19:48 +01:00
|
|
|
|
2022-11-18 14:40:12 +01:00
|
|
|
MANGA_LINE_MODEL_URL = os.environ.get(
|
|
|
|
"MANGA_LINE_MODEL_URL",
|
2023-02-26 02:19:48 +01:00
|
|
|
"https://github.com/Sanster/models/releases/download/manga/erika.jit",
|
|
|
|
)
|
|
|
|
MANGA_LINE_MODEL_MD5 = os.environ.get(
|
2023-03-22 05:57:29 +01:00
|
|
|
"MANGA_LINE_MODEL_MD5", "0c926d5a4af8450b0d00bc5b9a095644"
|
2022-11-18 14:40:12 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class Manga(InpaintModel):
|
2023-02-11 06:30:09 +01:00
|
|
|
name = "manga"
|
2022-11-18 14:40:12 +01:00
|
|
|
pad_mod = 16
|
|
|
|
|
|
|
|
def init_model(self, device, **kwargs):
|
2023-02-26 02:19:48 +01:00
|
|
|
self.inpaintor_model = load_jit_model(
|
|
|
|
MANGA_INPAINTOR_MODEL_URL, device, MANGA_INPAINTOR_MODEL_MD5
|
|
|
|
)
|
|
|
|
self.line_model = load_jit_model(
|
|
|
|
MANGA_LINE_MODEL_URL, device, MANGA_LINE_MODEL_MD5
|
|
|
|
)
|
2022-11-18 16:07:02 +01:00
|
|
|
self.seed = 42
|
2022-11-18 14:40:12 +01:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def is_downloaded() -> bool:
|
|
|
|
model_paths = [
|
|
|
|
get_cache_path_by_url(MANGA_INPAINTOR_MODEL_URL),
|
|
|
|
get_cache_path_by_url(MANGA_LINE_MODEL_URL),
|
|
|
|
]
|
|
|
|
return all([os.path.exists(it) for it in model_paths])
|
|
|
|
|
|
|
|
def forward(self, image, mask, config: Config):
|
|
|
|
"""
|
|
|
|
image: [H, W, C] RGB
|
|
|
|
mask: [H, W, 1]
|
|
|
|
return: BGR IMAGE
|
|
|
|
"""
|
2022-11-18 16:07:02 +01:00
|
|
|
seed = self.seed
|
|
|
|
random.seed(seed)
|
|
|
|
np.random.seed(seed)
|
|
|
|
torch.manual_seed(seed)
|
|
|
|
torch.cuda.manual_seed_all(seed)
|
|
|
|
|
2022-11-18 14:40:12 +01:00
|
|
|
gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
|
2023-02-26 02:19:48 +01:00
|
|
|
gray_img = torch.from_numpy(
|
|
|
|
gray_img[np.newaxis, np.newaxis, :, :].astype(np.float32)
|
|
|
|
).to(self.device)
|
2022-11-18 14:40:12 +01:00
|
|
|
start = time.time()
|
|
|
|
lines = self.line_model(gray_img)
|
2022-11-18 16:07:02 +01:00
|
|
|
torch.cuda.empty_cache()
|
2022-11-18 14:40:12 +01:00
|
|
|
lines = torch.clamp(lines, 0, 255)
|
|
|
|
logger.info(f"erika_model time: {time.time() - start}")
|
|
|
|
|
|
|
|
mask = torch.from_numpy(mask[np.newaxis, :, :, :]).to(self.device)
|
|
|
|
mask = mask.permute(0, 3, 1, 2)
|
2022-11-18 16:06:41 +01:00
|
|
|
mask = torch.where(mask > 0.5, 1.0, 0.0)
|
2022-11-18 14:40:12 +01:00
|
|
|
noise = torch.randn_like(mask)
|
2022-11-18 16:07:02 +01:00
|
|
|
ones = torch.ones_like(mask)
|
2022-11-18 14:40:12 +01:00
|
|
|
|
|
|
|
gray_img = gray_img / 255 * 2 - 1.0
|
|
|
|
lines = lines / 255 * 2 - 1.0
|
|
|
|
|
|
|
|
start = time.time()
|
2022-11-18 16:07:02 +01:00
|
|
|
inpainted_image = self.inpaintor_model(gray_img, lines, mask, noise, ones)
|
2022-11-18 14:40:12 +01:00
|
|
|
logger.info(f"image_inpaintor_model time: {time.time() - start}")
|
|
|
|
|
|
|
|
cur_res = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy()
|
|
|
|
cur_res = (cur_res * 127.5 + 127.5).astype(np.uint8)
|
|
|
|
cur_res = cv2.cvtColor(cur_res, cv2.COLOR_GRAY2BGR)
|
|
|
|
return cur_res
|