add model md5 check

This commit is contained in:
Qing 2023-02-26 09:19:48 +08:00
parent 64336498ba
commit ecfecac050
9 changed files with 2002 additions and 933 deletions

View File

@ -11,6 +11,15 @@ import torch
from lama_cleaner.const import MPS_SUPPORT_MODELS from lama_cleaner.const import MPS_SUPPORT_MODELS
from loguru import logger from loguru import logger
from torch.hub import download_url_to_file, get_dir from torch.hub import download_url_to_file, get_dir
import hashlib
def md5sum(filename):
md5 = hashlib.md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b""):
md5.update(chunk)
return md5.hexdigest()
def switch_mps_device(model_name, device): def switch_mps_device(model_name, device):
@ -33,12 +42,22 @@ def get_cache_path_by_url(url):
return cached_file return cached_file
def download_model(url): def download_model(url, model_md5: str = None):
cached_file = get_cache_path_by_url(url) cached_file = get_cache_path_by_url(url)
if not os.path.exists(cached_file): if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = None hash_prefix = None
download_url_to_file(url, cached_file, hash_prefix, progress=True) download_url_to_file(url, cached_file, hash_prefix, progress=True)
if model_md5:
_md5 = md5sum(cached_file)
if model_md5 == _md5:
logger.info(f"Download model success, md5: {_md5}")
else:
logger.error(
f"Download model failed, md5: {_md5}, expected: {model_md5}. Please delete model at {cached_file} and restart lama-cleaner"
)
exit(-1)
return cached_file return cached_file
@ -48,42 +67,49 @@ def ceil_modulo(x, mod):
return (x // mod + 1) * mod return (x // mod + 1) * mod
def \ def handle_error(model_path, model_md5, e):
load_jit_model(url_or_path, device): _md5 = md5sum(model_path)
if _md5 != model_md5:
logger.error(
f"Model md5: {_md5}, expected: {model_md5}, please delete {model_path} and restart lama-cleaner."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
)
else:
logger.error(
f"Failed to load model {model_path},"
f"please submit an issue at https://github.com/Sanster/lama-cleaner/issues and include a screenshot of the error:\n{e}"
)
exit(-1)
def load_jit_model(url_or_path, device, model_md5: str):
if os.path.exists(url_or_path): if os.path.exists(url_or_path):
model_path = url_or_path model_path = url_or_path
else: else:
model_path = download_model(url_or_path) model_path = download_model(url_or_path, model_md5)
logger.info(f"Loading model from: {model_path}") logger.info(f"Loading model from: {model_path}")
try: try:
model = torch.jit.load(model_path, map_location="cpu").to(device) model = torch.jit.load(model_path, map_location="cpu").to(device)
except Exception as e: except Exception as e:
logger.error( handle_error(model_path, model_md5, e)
f"Failed to load {model_path}, please delete model and restart lama-cleaner.\n"
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
f"If all above operations doesn't work, please submit an issue at https://github.com/Sanster/lama-cleaner/issues and include a screenshot of the error:\n{e}"
)
exit(-1)
model.eval() model.eval()
return model return model
def load_model(model: torch.nn.Module, url_or_path, device): def load_model(model: torch.nn.Module, url_or_path, device, model_md5):
if os.path.exists(url_or_path): if os.path.exists(url_or_path):
model_path = url_or_path model_path = url_or_path
else: else:
model_path = download_model(url_or_path) model_path = download_model(url_or_path, model_md5)
try: try:
logger.info(f"Loading model from: {model_path}")
state_dict = torch.load(model_path, map_location="cpu") state_dict = torch.load(model_path, map_location="cpu")
model.load_state_dict(state_dict, strict=True) model.load_state_dict(state_dict, strict=True)
model.to(device) model.to(device)
logger.info(f"Load model from: {model_path}") except Exception as e:
except: handle_error(model_path, model_md5, e)
logger.error(
f"Failed to load {model_path}, delete model and restart lama-cleaner"
)
exit(-1)
model.eval() model.eval()
return model return model

View File

@ -156,12 +156,13 @@ INTERACTIVE_SEG_MODEL_URL = os.environ.get(
"INTERACTIVE_SEG_MODEL_URL", "INTERACTIVE_SEG_MODEL_URL",
"https://github.com/Sanster/models/releases/download/clickseg_pplnet/clickseg_pplnet.pt", "https://github.com/Sanster/models/releases/download/clickseg_pplnet/clickseg_pplnet.pt",
) )
INTERACTIVE_SEG_MODEL_MD5 = os.environ.get("INTERACTIVE_SEG_MODEL_MD5", "8ca44b6e02bca78f62ec26a3c32376cf")
class InteractiveSeg: class InteractiveSeg:
def __init__(self, infer_size=384, open_kernel_size=3, dilate_kernel_size=3): def __init__(self, infer_size=384, open_kernel_size=3, dilate_kernel_size=3):
device = torch.device('cpu') device = torch.device('cpu')
model = load_jit_model(INTERACTIVE_SEG_MODEL_URL, device).eval() model = load_jit_model(INTERACTIVE_SEG_MODEL_URL, device, INTERACTIVE_SEG_MODEL_MD5).eval()
self.predictor = ISPredictor(model, device, self.predictor = ISPredictor(model, device,
infer_size=infer_size, infer_size=infer_size,
open_kernel_size=open_kernel_size, open_kernel_size=open_kernel_size,

File diff suppressed because it is too large Load Diff

View File

@ -16,6 +16,7 @@ LAMA_MODEL_URL = os.environ.get(
"LAMA_MODEL_URL", "LAMA_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt", "https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt",
) )
LAMA_MODEL_MD5 = os.environ.get("LAMA_MODEL_MD5", "e3aa4aaa15225a33ec84f9f4bc47e500")
class LaMa(InpaintModel): class LaMa(InpaintModel):
@ -23,7 +24,7 @@ class LaMa(InpaintModel):
pad_mod = 8 pad_mod = 8
def init_model(self, device, **kwargs): def init_model(self, device, **kwargs):
self.model = load_jit_model(LAMA_MODEL_URL, device).eval() self.model = load_jit_model(LAMA_MODEL_URL, device, LAMA_MODEL_MD5).eval()
@staticmethod @staticmethod
def is_downloaded() -> bool: def is_downloaded() -> bool:

View File

@ -26,17 +26,27 @@ LDM_ENCODE_MODEL_URL = os.environ.get(
"LDM_ENCODE_MODEL_URL", "LDM_ENCODE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_encode.pt", "https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_encode.pt",
) )
LDM_ENCODE_MODEL_MD5 = os.environ.get(
"LDM_ENCODE_MODEL_MD5", "23239fc9081956a3e70de56472b3f296"
)
LDM_DECODE_MODEL_URL = os.environ.get( LDM_DECODE_MODEL_URL = os.environ.get(
"LDM_DECODE_MODEL_URL", "LDM_DECODE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_decode.pt", "https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_decode.pt",
) )
LDM_DECODE_MODEL_MD5 = os.environ.get(
"LDM_DECODE_MODEL_MD5", "fe419cd15a750d37a4733589d0d3585c"
)
LDM_DIFFUSION_MODEL_URL = os.environ.get( LDM_DIFFUSION_MODEL_URL = os.environ.get(
"LDM_DIFFUSION_MODEL_URL", "LDM_DIFFUSION_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_ldm/diffusion.pt", "https://github.com/Sanster/models/releases/download/add_ldm/diffusion.pt",
) )
LDM_DIFFUSION_MODEL_MD5 = os.environ.get(
"LDM_DIFFUSION_MODEL_MD5", "b0afda12bf790c03aba2a7431f11d22d"
)
class DDPM(nn.Module): class DDPM(nn.Module):
# classic DDPM with Gaussian diffusion, in image space # classic DDPM with Gaussian diffusion, in image space
@ -234,9 +244,15 @@ class LDM(InpaintModel):
self.device = device self.device = device
def init_model(self, device, **kwargs): def init_model(self, device, **kwargs):
self.diffusion_model = load_jit_model(LDM_DIFFUSION_MODEL_URL, device) self.diffusion_model = load_jit_model(
self.cond_stage_model_decode = load_jit_model(LDM_DECODE_MODEL_URL, device) LDM_DIFFUSION_MODEL_URL, device, LDM_DIFFUSION_MODEL_MD5
self.cond_stage_model_encode = load_jit_model(LDM_ENCODE_MODEL_URL, device) )
self.cond_stage_model_decode = load_jit_model(
LDM_DECODE_MODEL_URL, device, LDM_DECODE_MODEL_MD5
)
self.cond_stage_model_encode = load_jit_model(
LDM_ENCODE_MODEL_URL, device, LDM_ENCODE_MODEL_MD5
)
if self.fp16 and "cuda" in str(device): if self.fp16 and "cuda" in str(device):
self.diffusion_model = self.diffusion_model.half() self.diffusion_model = self.diffusion_model.half()
self.cond_stage_model_decode = self.cond_stage_model_decode.half() self.cond_stage_model_decode = self.cond_stage_model_decode.half()

View File

@ -11,67 +11,21 @@ from lama_cleaner.helper import get_cache_path_by_url, load_jit_model
from lama_cleaner.model.base import InpaintModel from lama_cleaner.model.base import InpaintModel
from lama_cleaner.schema import Config from lama_cleaner.schema import Config
# def norm(np_img):
# return np_img / 255 * 2 - 1.0
#
#
# @torch.no_grad()
# def run():
# name = 'manga_1080x740.jpg'
# img_p = f'/Users/qing/code/github/MangaInpainting/examples/test/imgs/{name}'
# mask_p = f'/Users/qing/code/github/MangaInpainting/examples/test/masks/mask_{name}'
# erika_model = torch.jit.load('erika.jit')
# manga_inpaintor_model = torch.jit.load('manga_inpaintor.jit')
#
# img = cv2.imread(img_p)
# gray_img = cv2.imread(img_p, cv2.IMREAD_GRAYSCALE)
# mask = cv2.imread(mask_p, cv2.IMREAD_GRAYSCALE)
#
# kernel = np.ones((9, 9), dtype=np.uint8)
# mask = cv2.dilate(mask, kernel, 2)
# # cv2.imwrite("mask.jpg", mask)
# # cv2.imshow('dilated_mask', cv2.hconcat([mask, dilated_mask]))
# # cv2.waitKey(0)
# # exit()
#
# # img = pad(img)
# gray_img = pad(gray_img).astype(np.float32)
# mask = pad(mask)
#
# # pad_mod = 16
# import time
# start = time.time()
# y = erika_model(torch.from_numpy(gray_img[np.newaxis, np.newaxis, :, :]))
# y = torch.clamp(y, 0, 255)
# lines = y.cpu().numpy()
# print(f"erika_model time: {time.time() - start}")
#
# cv2.imwrite('lines.png', lines[0][0])
#
# start = time.time()
# masks = torch.from_numpy(mask[np.newaxis, np.newaxis, :, :])
# masks = torch.where(masks > 0.5, torch.tensor(1.0), torch.tensor(0.0))
# noise = torch.randn_like(masks)
#
# images = torch.from_numpy(norm(gray_img)[np.newaxis, np.newaxis, :, :])
# lines = torch.from_numpy(norm(lines))
#
# outputs = manga_inpaintor_model(images, lines, masks, noise)
# print(f"manga_inpaintor_model time: {time.time() - start}")
#
# outputs_merged = (outputs * masks) + (images * (1 - masks))
# outputs_merged = outputs_merged * 127.5 + 127.5
# outputs_merged = outputs_merged.permute(0, 2, 3, 1)[0].detach().cpu().numpy().astype(np.uint8)
# cv2.imwrite(f'output_{name}', outputs_merged)
MANGA_INPAINTOR_MODEL_URL = os.environ.get( MANGA_INPAINTOR_MODEL_URL = os.environ.get(
"MANGA_INPAINTOR_MODEL_URL", "MANGA_INPAINTOR_MODEL_URL",
"https://github.com/Sanster/models/releases/download/manga/manga_inpaintor.jit" "https://github.com/Sanster/models/releases/download/manga/manga_inpaintor.jit",
) )
MANGA_INPAINTOR_MODEL_MD5 = os.environ.get(
"MANGA_INPAINTOR_MODEL_MD5", "7d8b269c4613b6b3768af714610da86c"
)
MANGA_LINE_MODEL_URL = os.environ.get( MANGA_LINE_MODEL_URL = os.environ.get(
"MANGA_LINE_MODEL_URL", "MANGA_LINE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/manga/erika.jit" "https://github.com/Sanster/models/releases/download/manga/erika.jit",
)
MANGA_LINE_MODEL_MD5 = os.environ.get(
"MANGA_LINE_MODEL_MD5", "8f157c142718f11e233d3750a65e0794"
) )
@ -80,8 +34,12 @@ class Manga(InpaintModel):
pad_mod = 16 pad_mod = 16
def init_model(self, device, **kwargs): def init_model(self, device, **kwargs):
self.inpaintor_model = load_jit_model(MANGA_INPAINTOR_MODEL_URL, device) self.inpaintor_model = load_jit_model(
self.line_model = load_jit_model(MANGA_LINE_MODEL_URL, device) MANGA_INPAINTOR_MODEL_URL, device, MANGA_INPAINTOR_MODEL_MD5
)
self.line_model = load_jit_model(
MANGA_LINE_MODEL_URL, device, MANGA_LINE_MODEL_MD5
)
self.seed = 42 self.seed = 42
@staticmethod @staticmethod
@ -105,7 +63,9 @@ class Manga(InpaintModel):
torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed_all(seed)
gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
gray_img = torch.from_numpy(gray_img[np.newaxis, np.newaxis, :, :].astype(np.float32)).to(self.device) gray_img = torch.from_numpy(
gray_img[np.newaxis, np.newaxis, :, :].astype(np.float32)
).to(self.device)
start = time.time() start = time.time()
lines = self.line_model(gray_img) lines = self.line_model(gray_img)
torch.cuda.empty_cache() torch.cuda.empty_cache()

File diff suppressed because it is too large Load Diff

View File

@ -17,21 +17,33 @@ ZITS_INPAINT_MODEL_URL = os.environ.get(
"ZITS_INPAINT_MODEL_URL", "ZITS_INPAINT_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-inpaint-0717.pt", "https://github.com/Sanster/models/releases/download/add_zits/zits-inpaint-0717.pt",
) )
ZITS_INPAINT_MODEL_MD5 = os.environ.get(
"ZITS_INPAINT_MODEL_MD5", "9978cc7157dc29699e42308d675b2154"
)
ZITS_EDGE_LINE_MODEL_URL = os.environ.get( ZITS_EDGE_LINE_MODEL_URL = os.environ.get(
"ZITS_EDGE_LINE_MODEL_URL", "ZITS_EDGE_LINE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-edge-line-0717.pt", "https://github.com/Sanster/models/releases/download/add_zits/zits-edge-line-0717.pt",
) )
ZITS_EDGE_LINE_MODEL_MD5 = os.environ.get(
"ZITS_EDGE_LINE_MODEL_MD5", "55e31af21ba96bbf0c80603c76ea8c5f"
)
ZITS_STRUCTURE_UPSAMPLE_MODEL_URL = os.environ.get( ZITS_STRUCTURE_UPSAMPLE_MODEL_URL = os.environ.get(
"ZITS_STRUCTURE_UPSAMPLE_MODEL_URL", "ZITS_STRUCTURE_UPSAMPLE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-structure-upsample-0717.pt", "https://github.com/Sanster/models/releases/download/add_zits/zits-structure-upsample-0717.pt",
) )
ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5 = os.environ.get(
"ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5", "3d88a07211bd41b2ec8cc0d999f29927"
)
ZITS_WIRE_FRAME_MODEL_URL = os.environ.get( ZITS_WIRE_FRAME_MODEL_URL = os.environ.get(
"ZITS_WIRE_FRAME_MODEL_URL", "ZITS_WIRE_FRAME_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-wireframe-0717.pt", "https://github.com/Sanster/models/releases/download/add_zits/zits-wireframe-0717.pt",
) )
ZITS_WIRE_FRAME_MODEL_MD5 = os.environ.get(
"ZITS_WIRE_FRAME_MODEL_MD5", "a9727c63a8b48b65c905d351b21ce46b"
)
def resize(img, height, width, center_crop=False): def resize(img, height, width, center_crop=False):
@ -219,12 +231,12 @@ class ZITS(InpaintModel):
self.sample_edge_line_iterations = 1 self.sample_edge_line_iterations = 1
def init_model(self, device, **kwargs): def init_model(self, device, **kwargs):
self.wireframe = load_jit_model(ZITS_WIRE_FRAME_MODEL_URL, device) self.wireframe = load_jit_model(ZITS_WIRE_FRAME_MODEL_URL, device, ZITS_WIRE_FRAME_MODEL_MD5)
self.edge_line = load_jit_model(ZITS_EDGE_LINE_MODEL_URL, device) self.edge_line = load_jit_model(ZITS_EDGE_LINE_MODEL_URL, device, ZITS_EDGE_LINE_MODEL_MD5)
self.structure_upsample = load_jit_model( self.structure_upsample = load_jit_model(
ZITS_STRUCTURE_UPSAMPLE_MODEL_URL, device ZITS_STRUCTURE_UPSAMPLE_MODEL_URL, device, ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5
) )
self.inpaint = load_jit_model(ZITS_INPAINT_MODEL_URL, device) self.inpaint = load_jit_model(ZITS_INPAINT_MODEL_URL, device, ZITS_INPAINT_MODEL_MD5)
@staticmethod @staticmethod
def is_downloaded() -> bool: def is_downloaded() -> bool:

View File

@ -0,0 +1,54 @@
import os
import tempfile
from pathlib import Path
def test_load_model():
from lama_cleaner.interactive_seg import InteractiveSeg
from lama_cleaner.model_manager import ModelManager
interactive_seg_model = InteractiveSeg()
models = [
"lama",
"ldm",
"zits",
"mat",
"fcf",
"manga",
]
for m in models:
ModelManager(
name=m,
device="cpu",
no_half=False,
hf_access_token="",
disable_nsfw=False,
sd_cpu_textencoder=True,
sd_run_local=True,
local_files_only=True,
cpu_offload=True,
enable_xformers=False,
)
# def create_empty_file(tmp_dir, name):
# tmp_model_dir = os.path.join(tmp_dir, "torch", "hub", "checkpoints")
# Path(tmp_model_dir).mkdir(exist_ok=True, parents=True)
# path = os.path.join(tmp_model_dir, name)
# with open(path, "w") as f:
# f.write("1")
#
#
# def test_load_model_error():
# MODELS = [
# ("big-lama.pt", "e3aa4aaa15225a33ec84f9f4bc47e500"),
# ("cond_stage_model_encode.pt", "23239fc9081956a3e70de56472b3f296"),
# ("cond_stage_model_decode.pt", "fe419cd15a750d37a4733589d0d3585c"),
# ("diffusion.pt", "b0afda12bf790c03aba2a7431f11d22d"),
# ]
# with tempfile.TemporaryDirectory() as tmp_dir:
# os.environ["XDG_CACHE_HOME"] = tmp_dir
# for name, md5 in MODELS:
# create_empty_file(tmp_dir, name)
# test_load_model()