iopaint changet to inpaint

This commit is contained in:
root 2024-08-20 21:33:21 +02:00
parent 309e2cb346
commit c327e735cb
13 changed files with 37 additions and 37 deletions

View File

@ -1,4 +1,4 @@
<h1 align="center">IOPaint</h1>
<h1 align="center">Image Sorter InPaint</h1>
<p align="center">A free and open-source inpainting & outpainting tool powered by SOTA AI model.</p>
<p align="center">

View File

@ -94,7 +94,7 @@ def run(
batch_inpaint(model, device, image, mask, output, config, concat)
@typer_app.command(help="Start IOPaint server")
@typer_app.command(help="Start InPaint server")
@use_json_config()
def start(
host: str = Option("127.0.0.1"),
@ -222,7 +222,7 @@ def start(
api.launch()
@typer_app.command(help="Start IOPaint web config page")
@typer_app.command(help="Start InPaint web config page")
def start_web_config(
config_file: Path = Option("config.json"),
):

View File

@ -125,4 +125,4 @@ GFPGAN_HELP = "Enable GFPGAN face restore. To also enhance background, use with
RESTOREFORMER_HELP = "Enable RestoreFormer face restore. To also enhance background, use with --enable-realesrgan"
GIF_HELP = "Enable GIF plugin. Make GIF to compare original and cleaned image"
INBROWSER_HELP = "Automatically launch IOPaint in a new tab on the default browser"
INBROWSER_HELP = "Automatically launch InPaint in a new tab on the default browser"

View File

@ -111,7 +111,7 @@ def get_sdxl_model_type(model_abs_path: str) -> Optional[ModelType]:
def scan_single_file_diffusion_models(cache_dir) -> List[ModelInfo]:
cache_dir = Path(cache_dir)
stable_diffusion_dir = cache_dir / "stable_diffusion"
cache_file = stable_diffusion_dir / "iopaint_cache.json"
cache_file = stable_diffusion_dir / "inpaint_cache.json"
model_type_cache = {}
if cache_file.exists():
try:
@ -146,7 +146,7 @@ def scan_single_file_diffusion_models(cache_dir) -> List[ModelInfo]:
json.dump(model_type_cache, fw, indent=2, ensure_ascii=False)
stable_diffusion_xl_dir = cache_dir / "stable_diffusion_xl"
sdxl_cache_file = stable_diffusion_xl_dir / "iopaint_cache.json"
sdxl_cache_file = stable_diffusion_xl_dir / "inpaint_cache.json"
sdxl_model_type_cache = {}
if sdxl_cache_file.exists():
try:

View File

@ -59,12 +59,12 @@ def download_model(url, model_md5: str = None):
try:
os.remove(cached_file)
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint."
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart inpaint."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
)
except:
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart iopaint."
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart inpaint."
)
exit(-1)
@ -83,12 +83,12 @@ def handle_error(model_path, model_md5, e):
try:
os.remove(model_path)
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint."
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart inpaint."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
)
except:
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart iopaint."
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart inpaint."
)
else:
logger.error(

View File

@ -1,5 +1,5 @@
model:
target: iopaint.model.anytext.cldm.cldm.ControlLDM
target: inpaint.model.anytext.cldm.cldm.ControlLDM
params:
linear_start: 0.00085
linear_end: 0.0120
@ -25,7 +25,7 @@ model:
with_step_weight: true
use_vae_upsample: true
embedding_manager_config:
target: iopaint.model.anytext.cldm.embedding_manager.EmbeddingManager
target: inpaint.model.anytext.cldm.embedding_manager.EmbeddingManager
params:
valid: true # v6
emb_type: ocr # ocr, vit, conv
@ -35,7 +35,7 @@ model:
placeholder_string: '*'
control_stage_config:
target: iopaint.model.anytext.cldm.cldm.ControlNet
target: inpaint.model.anytext.cldm.cldm.ControlNet
params:
image_size: 32 # unused
in_channels: 4
@ -53,7 +53,7 @@ model:
legacy: False
unet_config:
target: iopaint.model.anytext.cldm.cldm.ControlledUnetModel
target: inpaint.model.anytext.cldm.cldm.ControlledUnetModel
params:
image_size: 32 # unused
in_channels: 4
@ -70,7 +70,7 @@ model:
legacy: False
first_stage_config:
target: iopaint.model.anytext.ldm.models.autoencoder.AutoencoderKL
target: inpaint.model.anytext.ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
@ -93,7 +93,7 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedderT3
target: inpaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedderT3
params:
version: openai/clip-vit-large-patch14
use_vision: false # v6

View File

@ -8,7 +8,7 @@ import torch.nn as nn
import copy
from easydict import EasyDict as edict
from iopaint.model.anytext.ldm.modules.diffusionmodules.util import (
from inpaint.model.anytext.ldm.modules.diffusionmodules.util import (
conv_nd,
linear,
zero_module,
@ -16,12 +16,12 @@ from iopaint.model.anytext.ldm.modules.diffusionmodules.util import (
)
from einops import rearrange, repeat
from iopaint.model.anytext.ldm.modules.attention import SpatialTransformer
from iopaint.model.anytext.ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock
from iopaint.model.anytext.ldm.models.diffusion.ddpm import LatentDiffusion
from iopaint.model.anytext.ldm.util import log_txt_as_img, exists, instantiate_from_config
from iopaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler
from iopaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from inpaint.model.anytext.ldm.modules.attention import SpatialTransformer
from inpaint.model.anytext.ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock
from inpaint.model.anytext.ldm.models.diffusion.ddpm import LatentDiffusion
from inpaint.model.anytext.ldm.util import log_txt_as_img, exists, instantiate_from_config
from inpaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler
from inpaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from .recognizer import TextRecognizer, create_predictor
CURRENT_DIR = Path(os.path.dirname(os.path.abspath(__file__)))

View File

@ -4,7 +4,7 @@ import torch
import numpy as np
from tqdm import tqdm
from iopaint.model.anytext.ldm.modules.diffusionmodules.util import (
from inpaint.model.anytext.ldm.modules.diffusionmodules.util import (
make_ddim_sampling_parameters,
make_ddim_timesteps,
noise_like,

View File

@ -5,7 +5,7 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from iopaint.model.anytext.ldm.modules.diffusionmodules.util import conv_nd, linear
from inpaint.model.anytext.ldm.modules.diffusionmodules.util import conv_nd, linear
def get_clip_token_for_string(tokenizer, string):

View File

@ -1,11 +1,11 @@
import torch
import einops
import iopaint.model.anytext.ldm.modules.encoders.modules
import iopaint.model.anytext.ldm.modules.attention
import inpaint.model.anytext.ldm.modules.encoders.modules
import inpaint.model.anytext.ldm.modules.attention
from transformers import logging
from iopaint.model.anytext.ldm.modules.attention import default
from inpaint.model.anytext.ldm.modules.attention import default
def disable_verbosity():
@ -15,15 +15,15 @@ def disable_verbosity():
def enable_sliced_attention():
iopaint.model.anytext.ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward
inpaint.model.anytext.ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward
print('Enabled sliced_attention.')
return
def hack_everything(clip_skip=0):
disable_verbosity()
iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward
iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip
inpaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward
inpaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip
print('Enabled clip hacks.')
return

View File

@ -2,7 +2,7 @@ import os
import torch
from omegaconf import OmegaConf
from iopaint.model.anytext.ldm.util import instantiate_from_config
from inpaint.model.anytext.ldm.util import instantiate_from_config
def get_state_dict(d):

View File

@ -8,7 +8,7 @@ import math
import traceback
from easydict import EasyDict as edict
import time
from iopaint.model.anytext.ocr_recog.RecModel import RecModel
from inpaint.model.anytext.ocr_recog.RecModel import RecModel
import torch
import torch.nn.functional as F

View File

@ -2,11 +2,11 @@ import torch
import torch.nn.functional as F
from contextlib import contextmanager
from iopaint.model.anytext.ldm.modules.diffusionmodules.model import Encoder, Decoder
from iopaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from inpaint.model.anytext.ldm.modules.diffusionmodules.model import Encoder, Decoder
from inpaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from iopaint.model.anytext.ldm.util import instantiate_from_config
from iopaint.model.anytext.ldm.modules.ema import LitEma
from inpaint.model.anytext.ldm.util import instantiate_from_config
from inpaint.model.anytext.ldm.modules.ema import LitEma
class AutoencoderKL(torch.nn.Module):