From c327e735cb5e4410be41eeeeccf8a473fbca6764 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 20 Aug 2024 21:33:21 +0200 Subject: [PATCH] iopaint changet to inpaint --- README.md | 2 +- inpaint/cli.py | 4 ++-- inpaint/const.py | 2 +- inpaint/download.py | 4 ++-- inpaint/helper.py | 8 ++++---- inpaint/model/anytext/anytext_sd15.yaml | 12 ++++++------ inpaint/model/anytext/cldm/cldm.py | 14 +++++++------- inpaint/model/anytext/cldm/ddim_hacked.py | 2 +- inpaint/model/anytext/cldm/embedding_manager.py | 2 +- inpaint/model/anytext/cldm/hack.py | 12 ++++++------ inpaint/model/anytext/cldm/model.py | 2 +- inpaint/model/anytext/cldm/recognizer.py | 2 +- inpaint/model/anytext/ldm/models/autoencoder.py | 8 ++++---- 13 files changed, 37 insertions(+), 37 deletions(-) diff --git a/README.md b/README.md index 69b3dc0..d264bce 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -

IOPaint

+

Image Sorter InPaint

A free and open-source inpainting & outpainting tool powered by SOTA AI model.

diff --git a/inpaint/cli.py b/inpaint/cli.py index fb8e94a..6722902 100644 --- a/inpaint/cli.py +++ b/inpaint/cli.py @@ -94,7 +94,7 @@ def run( batch_inpaint(model, device, image, mask, output, config, concat) -@typer_app.command(help="Start IOPaint server") +@typer_app.command(help="Start InPaint server") @use_json_config() def start( host: str = Option("127.0.0.1"), @@ -222,7 +222,7 @@ def start( api.launch() -@typer_app.command(help="Start IOPaint web config page") +@typer_app.command(help="Start InPaint web config page") def start_web_config( config_file: Path = Option("config.json"), ): diff --git a/inpaint/const.py b/inpaint/const.py index b18254b..17e252b 100644 --- a/inpaint/const.py +++ b/inpaint/const.py @@ -125,4 +125,4 @@ GFPGAN_HELP = "Enable GFPGAN face restore. To also enhance background, use with RESTOREFORMER_HELP = "Enable RestoreFormer face restore. To also enhance background, use with --enable-realesrgan" GIF_HELP = "Enable GIF plugin. Make GIF to compare original and cleaned image" -INBROWSER_HELP = "Automatically launch IOPaint in a new tab on the default browser" +INBROWSER_HELP = "Automatically launch InPaint in a new tab on the default browser" diff --git a/inpaint/download.py b/inpaint/download.py index c0a099f..741e611 100644 --- a/inpaint/download.py +++ b/inpaint/download.py @@ -111,7 +111,7 @@ def get_sdxl_model_type(model_abs_path: str) -> Optional[ModelType]: def scan_single_file_diffusion_models(cache_dir) -> List[ModelInfo]: cache_dir = Path(cache_dir) stable_diffusion_dir = cache_dir / "stable_diffusion" - cache_file = stable_diffusion_dir / "iopaint_cache.json" + cache_file = stable_diffusion_dir / "inpaint_cache.json" model_type_cache = {} if cache_file.exists(): try: @@ -146,7 +146,7 @@ def scan_single_file_diffusion_models(cache_dir) -> List[ModelInfo]: json.dump(model_type_cache, fw, indent=2, ensure_ascii=False) stable_diffusion_xl_dir = cache_dir / "stable_diffusion_xl" - sdxl_cache_file = stable_diffusion_xl_dir / "iopaint_cache.json" + sdxl_cache_file = stable_diffusion_xl_dir / "inpaint_cache.json" sdxl_model_type_cache = {} if sdxl_cache_file.exists(): try: diff --git a/inpaint/helper.py b/inpaint/helper.py index c2c0c48..d40ba00 100644 --- a/inpaint/helper.py +++ b/inpaint/helper.py @@ -59,12 +59,12 @@ def download_model(url, model_md5: str = None): try: os.remove(cached_file) logger.error( - f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint." + f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart inpaint." f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n" ) except: logger.error( - f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart iopaint." + f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart inpaint." ) exit(-1) @@ -83,12 +83,12 @@ def handle_error(model_path, model_md5, e): try: os.remove(model_path) logger.error( - f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint." + f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart inpaint." f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n" ) except: logger.error( - f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart iopaint." + f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart inpaint." ) else: logger.error( diff --git a/inpaint/model/anytext/anytext_sd15.yaml b/inpaint/model/anytext/anytext_sd15.yaml index d727594..6a7a391 100644 --- a/inpaint/model/anytext/anytext_sd15.yaml +++ b/inpaint/model/anytext/anytext_sd15.yaml @@ -1,5 +1,5 @@ model: - target: iopaint.model.anytext.cldm.cldm.ControlLDM + target: inpaint.model.anytext.cldm.cldm.ControlLDM params: linear_start: 0.00085 linear_end: 0.0120 @@ -25,7 +25,7 @@ model: with_step_weight: true use_vae_upsample: true embedding_manager_config: - target: iopaint.model.anytext.cldm.embedding_manager.EmbeddingManager + target: inpaint.model.anytext.cldm.embedding_manager.EmbeddingManager params: valid: true # v6 emb_type: ocr # ocr, vit, conv @@ -35,7 +35,7 @@ model: placeholder_string: '*' control_stage_config: - target: iopaint.model.anytext.cldm.cldm.ControlNet + target: inpaint.model.anytext.cldm.cldm.ControlNet params: image_size: 32 # unused in_channels: 4 @@ -53,7 +53,7 @@ model: legacy: False unet_config: - target: iopaint.model.anytext.cldm.cldm.ControlledUnetModel + target: inpaint.model.anytext.cldm.cldm.ControlledUnetModel params: image_size: 32 # unused in_channels: 4 @@ -70,7 +70,7 @@ model: legacy: False first_stage_config: - target: iopaint.model.anytext.ldm.models.autoencoder.AutoencoderKL + target: inpaint.model.anytext.ldm.models.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss @@ -93,7 +93,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedderT3 + target: inpaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedderT3 params: version: openai/clip-vit-large-patch14 use_vision: false # v6 diff --git a/inpaint/model/anytext/cldm/cldm.py b/inpaint/model/anytext/cldm/cldm.py index ad9692a..2bbe2ff 100644 --- a/inpaint/model/anytext/cldm/cldm.py +++ b/inpaint/model/anytext/cldm/cldm.py @@ -8,7 +8,7 @@ import torch.nn as nn import copy from easydict import EasyDict as edict -from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( +from inpaint.model.anytext.ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, @@ -16,12 +16,12 @@ from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( ) from einops import rearrange, repeat -from iopaint.model.anytext.ldm.modules.attention import SpatialTransformer -from iopaint.model.anytext.ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock -from iopaint.model.anytext.ldm.models.diffusion.ddpm import LatentDiffusion -from iopaint.model.anytext.ldm.util import log_txt_as_img, exists, instantiate_from_config -from iopaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler -from iopaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution +from inpaint.model.anytext.ldm.modules.attention import SpatialTransformer +from inpaint.model.anytext.ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock +from inpaint.model.anytext.ldm.models.diffusion.ddpm import LatentDiffusion +from inpaint.model.anytext.ldm.util import log_txt_as_img, exists, instantiate_from_config +from inpaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler +from inpaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution from .recognizer import TextRecognizer, create_predictor CURRENT_DIR = Path(os.path.dirname(os.path.abspath(__file__))) diff --git a/inpaint/model/anytext/cldm/ddim_hacked.py b/inpaint/model/anytext/cldm/ddim_hacked.py index b23a883..8df37f5 100644 --- a/inpaint/model/anytext/cldm/ddim_hacked.py +++ b/inpaint/model/anytext/cldm/ddim_hacked.py @@ -4,7 +4,7 @@ import torch import numpy as np from tqdm import tqdm -from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( +from inpaint.model.anytext.ldm.modules.diffusionmodules.util import ( make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, diff --git a/inpaint/model/anytext/cldm/embedding_manager.py b/inpaint/model/anytext/cldm/embedding_manager.py index 6ccf8a9..0e2a7e4 100644 --- a/inpaint/model/anytext/cldm/embedding_manager.py +++ b/inpaint/model/anytext/cldm/embedding_manager.py @@ -5,7 +5,7 @@ import torch import torch.nn as nn import torch.nn.functional as F from functools import partial -from iopaint.model.anytext.ldm.modules.diffusionmodules.util import conv_nd, linear +from inpaint.model.anytext.ldm.modules.diffusionmodules.util import conv_nd, linear def get_clip_token_for_string(tokenizer, string): diff --git a/inpaint/model/anytext/cldm/hack.py b/inpaint/model/anytext/cldm/hack.py index 05afe5f..ad60ee1 100644 --- a/inpaint/model/anytext/cldm/hack.py +++ b/inpaint/model/anytext/cldm/hack.py @@ -1,11 +1,11 @@ import torch import einops -import iopaint.model.anytext.ldm.modules.encoders.modules -import iopaint.model.anytext.ldm.modules.attention +import inpaint.model.anytext.ldm.modules.encoders.modules +import inpaint.model.anytext.ldm.modules.attention from transformers import logging -from iopaint.model.anytext.ldm.modules.attention import default +from inpaint.model.anytext.ldm.modules.attention import default def disable_verbosity(): @@ -15,15 +15,15 @@ def disable_verbosity(): def enable_sliced_attention(): - iopaint.model.anytext.ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward + inpaint.model.anytext.ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward print('Enabled sliced_attention.') return def hack_everything(clip_skip=0): disable_verbosity() - iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward - iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip + inpaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward + inpaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip print('Enabled clip hacks.') return diff --git a/inpaint/model/anytext/cldm/model.py b/inpaint/model/anytext/cldm/model.py index 688f2ed..c0cf7eb 100644 --- a/inpaint/model/anytext/cldm/model.py +++ b/inpaint/model/anytext/cldm/model.py @@ -2,7 +2,7 @@ import os import torch from omegaconf import OmegaConf -from iopaint.model.anytext.ldm.util import instantiate_from_config +from inpaint.model.anytext.ldm.util import instantiate_from_config def get_state_dict(d): diff --git a/inpaint/model/anytext/cldm/recognizer.py b/inpaint/model/anytext/cldm/recognizer.py index 0621512..9838471 100755 --- a/inpaint/model/anytext/cldm/recognizer.py +++ b/inpaint/model/anytext/cldm/recognizer.py @@ -8,7 +8,7 @@ import math import traceback from easydict import EasyDict as edict import time -from iopaint.model.anytext.ocr_recog.RecModel import RecModel +from inpaint.model.anytext.ocr_recog.RecModel import RecModel import torch import torch.nn.functional as F diff --git a/inpaint/model/anytext/ldm/models/autoencoder.py b/inpaint/model/anytext/ldm/models/autoencoder.py index 20d52e9..58d0ad9 100644 --- a/inpaint/model/anytext/ldm/models/autoencoder.py +++ b/inpaint/model/anytext/ldm/models/autoencoder.py @@ -2,11 +2,11 @@ import torch import torch.nn.functional as F from contextlib import contextmanager -from iopaint.model.anytext.ldm.modules.diffusionmodules.model import Encoder, Decoder -from iopaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution +from inpaint.model.anytext.ldm.modules.diffusionmodules.model import Encoder, Decoder +from inpaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution -from iopaint.model.anytext.ldm.util import instantiate_from_config -from iopaint.model.anytext.ldm.modules.ema import LitEma +from inpaint.model.anytext.ldm.util import instantiate_from_config +from inpaint.model.anytext.ldm.modules.ema import LitEma class AutoencoderKL(torch.nn.Module):