70af4845af
new file: inpaint/__main__.py new file: inpaint/api.py new file: inpaint/batch_processing.py new file: inpaint/benchmark.py new file: inpaint/cli.py new file: inpaint/const.py new file: inpaint/download.py new file: inpaint/file_manager/__init__.py new file: inpaint/file_manager/file_manager.py new file: inpaint/file_manager/storage_backends.py new file: inpaint/file_manager/utils.py new file: inpaint/helper.py new file: inpaint/installer.py new file: inpaint/model/__init__.py new file: inpaint/model/anytext/__init__.py new file: inpaint/model/anytext/anytext_model.py new file: inpaint/model/anytext/anytext_pipeline.py new file: inpaint/model/anytext/anytext_sd15.yaml new file: inpaint/model/anytext/cldm/__init__.py new file: inpaint/model/anytext/cldm/cldm.py new file: inpaint/model/anytext/cldm/ddim_hacked.py new file: inpaint/model/anytext/cldm/embedding_manager.py new file: inpaint/model/anytext/cldm/hack.py new file: inpaint/model/anytext/cldm/model.py new file: inpaint/model/anytext/cldm/recognizer.py new file: inpaint/model/anytext/ldm/__init__.py new file: inpaint/model/anytext/ldm/models/__init__.py new file: inpaint/model/anytext/ldm/models/autoencoder.py new file: inpaint/model/anytext/ldm/models/diffusion/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/ddim.py new file: inpaint/model/anytext/ldm/models/diffusion/ddpm.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py new file: inpaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py new file: inpaint/model/anytext/ldm/models/diffusion/plms.py new file: inpaint/model/anytext/ldm/models/diffusion/sampling_util.py new file: inpaint/model/anytext/ldm/modules/__init__.py new file: inpaint/model/anytext/ldm/modules/attention.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/__init__.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/model.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py new file: inpaint/model/anytext/ldm/modules/diffusionmodules/util.py new file: inpaint/model/anytext/ldm/modules/distributions/__init__.py new file: inpaint/model/anytext/ldm/modules/distributions/distributions.py new file: inpaint/model/anytext/ldm/modules/ema.py new file: inpaint/model/anytext/ldm/modules/encoders/__init__.py new file: inpaint/model/anytext/ldm/modules/encoders/modules.py new file: inpaint/model/anytext/ldm/util.py new file: inpaint/model/anytext/main.py new file: inpaint/model/anytext/ocr_recog/RNN.py new file: inpaint/model/anytext/ocr_recog/RecCTCHead.py new file: inpaint/model/anytext/ocr_recog/RecModel.py new file: inpaint/model/anytext/ocr_recog/RecMv1_enhance.py new file: inpaint/model/anytext/ocr_recog/RecSVTR.py new file: inpaint/model/anytext/ocr_recog/__init__.py new file: inpaint/model/anytext/ocr_recog/common.py new file: inpaint/model/anytext/ocr_recog/en_dict.txt new file: inpaint/model/anytext/ocr_recog/ppocr_keys_v1.txt new file: inpaint/model/anytext/utils.py new file: inpaint/model/base.py new file: inpaint/model/brushnet/__init__.py new file: inpaint/model/brushnet/brushnet.py new file: inpaint/model/brushnet/brushnet_unet_forward.py new file: inpaint/model/brushnet/brushnet_wrapper.py new file: inpaint/model/brushnet/pipeline_brushnet.py new file: inpaint/model/brushnet/unet_2d_blocks.py new file: inpaint/model/controlnet.py new file: inpaint/model/ddim_sampler.py new file: inpaint/model/fcf.py new file: inpaint/model/helper/__init__.py new file: inpaint/model/helper/controlnet_preprocess.py new file: inpaint/model/helper/cpu_text_encoder.py new file: inpaint/model/helper/g_diffuser_bot.py new file: inpaint/model/instruct_pix2pix.py new file: inpaint/model/kandinsky.py new file: inpaint/model/lama.py new file: inpaint/model/ldm.py new file: inpaint/model/manga.py new file: inpaint/model/mat.py new file: inpaint/model/mi_gan.py new file: inpaint/model/opencv2.py new file: inpaint/model/original_sd_configs/__init__.py new file: inpaint/model/original_sd_configs/sd_xl_base.yaml new file: inpaint/model/original_sd_configs/sd_xl_refiner.yaml new file: inpaint/model/original_sd_configs/v1-inference.yaml new file: inpaint/model/original_sd_configs/v2-inference-v.yaml new file: inpaint/model/paint_by_example.py new file: inpaint/model/plms_sampler.py new file: inpaint/model/power_paint/__init__.py new file: inpaint/model/power_paint/pipeline_powerpaint.py new file: inpaint/model/power_paint/power_paint.py new file: inpaint/model/power_paint/power_paint_v2.py new file: inpaint/model/power_paint/powerpaint_tokenizer.py
111 lines
3.4 KiB
Python
111 lines
3.4 KiB
Python
import os
|
|
|
|
import cv2
|
|
import torch
|
|
|
|
from iopaint.helper import (
|
|
load_jit_model,
|
|
download_model,
|
|
get_cache_path_by_url,
|
|
boxes_from_mask,
|
|
resize_max_size,
|
|
norm_img,
|
|
)
|
|
from .base import InpaintModel
|
|
from iopaint.schema import InpaintRequest
|
|
|
|
MIGAN_MODEL_URL = os.environ.get(
|
|
"MIGAN_MODEL_URL",
|
|
"https://github.com/Sanster/models/releases/download/migan/migan_traced.pt",
|
|
)
|
|
MIGAN_MODEL_MD5 = os.environ.get("MIGAN_MODEL_MD5", "76eb3b1a71c400ee3290524f7a11b89c")
|
|
|
|
|
|
class MIGAN(InpaintModel):
|
|
name = "migan"
|
|
min_size = 512
|
|
pad_mod = 512
|
|
pad_to_square = True
|
|
is_erase_model = True
|
|
|
|
def init_model(self, device, **kwargs):
|
|
self.model = load_jit_model(MIGAN_MODEL_URL, device, MIGAN_MODEL_MD5).eval()
|
|
|
|
@staticmethod
|
|
def download():
|
|
download_model(MIGAN_MODEL_URL, MIGAN_MODEL_MD5)
|
|
|
|
@staticmethod
|
|
def is_downloaded() -> bool:
|
|
return os.path.exists(get_cache_path_by_url(MIGAN_MODEL_URL))
|
|
|
|
@torch.no_grad()
|
|
def __call__(self, image, mask, config: InpaintRequest):
|
|
"""
|
|
images: [H, W, C] RGB, not normalized
|
|
masks: [H, W]
|
|
return: BGR IMAGE
|
|
"""
|
|
if image.shape[0] == 512 and image.shape[1] == 512:
|
|
return self._pad_forward(image, mask, config)
|
|
|
|
boxes = boxes_from_mask(mask)
|
|
crop_result = []
|
|
config.hd_strategy_crop_margin = 128
|
|
for box in boxes:
|
|
crop_image, crop_mask, crop_box = self._crop_box(image, mask, box, config)
|
|
origin_size = crop_image.shape[:2]
|
|
resize_image = resize_max_size(crop_image, size_limit=512)
|
|
resize_mask = resize_max_size(crop_mask, size_limit=512)
|
|
inpaint_result = self._pad_forward(resize_image, resize_mask, config)
|
|
|
|
# only paste masked area result
|
|
inpaint_result = cv2.resize(
|
|
inpaint_result,
|
|
(origin_size[1], origin_size[0]),
|
|
interpolation=cv2.INTER_CUBIC,
|
|
)
|
|
|
|
original_pixel_indices = crop_mask < 127
|
|
inpaint_result[original_pixel_indices] = crop_image[:, :, ::-1][
|
|
original_pixel_indices
|
|
]
|
|
|
|
crop_result.append((inpaint_result, crop_box))
|
|
|
|
inpaint_result = image[:, :, ::-1].copy()
|
|
for crop_image, crop_box in crop_result:
|
|
x1, y1, x2, y2 = crop_box
|
|
inpaint_result[y1:y2, x1:x2, :] = crop_image
|
|
|
|
return inpaint_result
|
|
|
|
def forward(self, image, mask, config: InpaintRequest):
|
|
"""Input images and output images have same size
|
|
images: [H, W, C] RGB
|
|
masks: [H, W] mask area == 255
|
|
return: BGR IMAGE
|
|
"""
|
|
|
|
image = norm_img(image) # [0, 1]
|
|
image = image * 2 - 1 # [0, 1] -> [-1, 1]
|
|
mask = (mask > 120) * 255
|
|
mask = norm_img(mask)
|
|
|
|
image = torch.from_numpy(image).unsqueeze(0).to(self.device)
|
|
mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
|
|
|
|
erased_img = image * (1 - mask)
|
|
input_image = torch.cat([0.5 - mask, erased_img], dim=1)
|
|
|
|
output = self.model(input_image)
|
|
output = (
|
|
(output.permute(0, 2, 3, 1) * 127.5 + 127.5)
|
|
.round()
|
|
.clamp(0, 255)
|
|
.to(torch.uint8)
|
|
)
|
|
output = output[0].cpu().numpy()
|
|
cur_res = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
|
|
return cur_res
|