rename to iopaint

This commit is contained in:
Qing 2024-01-05 15:19:23 +08:00
parent f1f18aa6cd
commit a73e2a531f
101 changed files with 180 additions and 253 deletions

2
.gitignore vendored
View File

@ -4,7 +4,7 @@ examples/
.idea/ .idea/
.vscode/ .vscode/
build build
!lama_cleaner/app/build !iopaint/app/build
dist/ dist/
lama_cleaner.egg-info/ lama_cleaner.egg-info/
venv/ venv/

View File

@ -1,75 +1 @@
<p align="center"> # IOPaint
<img alt="logo" height=256 src="./assets/logo.png" />
</p>
<h1 align="center">Lama Cleaner</h1>
<p align="center">A free and open-source inpainting tool powered by SOTA AI model.</p>
<p align="center">
<a href="https://github.com/Sanster/lama-cleaner">
<img alt="total download" src="https://pepy.tech/badge/lama-cleaner" />
</a>
<a href="https://pypi.org/project/lama-cleaner/">
<img alt="version" src="https://img.shields.io/pypi/v/lama-cleaner" />
</a>
<a href="https://colab.research.google.com/drive/1e3ZkAJxvkK3uzaTGu91N9TvI_Mahs0Wb?usp=sharing">
<img alt="Open in Colab" src="https://colab.research.google.com/assets/colab-badge.svg" />
</a>
<a href="https://huggingface.co/spaces/Sanster/Lama-Cleaner-lama">
<img alt="Hugging Face Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue" />
</a>
<a href="">
<img alt="python version" src="https://img.shields.io/pypi/pyversions/lama-cleaner" />
</a>
<a href="https://hub.docker.com/r/cwq1913/lama-cleaner">
<img alt="version" src="https://img.shields.io/docker/pulls/cwq1913/lama-cleaner" />
</a>
</p>
https://user-images.githubusercontent.com/3998421/196976498-ba1ad3ab-fa18-4c55-965f-5c6683141375.mp4
## Features
- Completely free and open-source, fully self-hosted, support CPU & GPU & M1/2
- [Windows 1-Click Installer](https://lama-cleaner-docs.vercel.app/install/windows_1click_installer)
- [Native macOS app](https://opticlean.io/)
- Multiple SOTA AI [models](https://lama-cleaner-docs.vercel.app/models)
- Erase model: LaMa/LDM/ZITS/MAT/FcF/Manga
- Erase and Replace model: Stable Diffusion/Paint by Example
- [Plugins](https://lama-cleaner-docs.vercel.app/plugins) for post-processing:
- [RemoveBG](https://github.com/danielgatis/rembg): Remove images background
- [RealESRGAN](https://github.com/xinntao/Real-ESRGAN): Super Resolution
- [GFPGAN](https://github.com/TencentARC/GFPGAN): Face Restoration
- [RestoreFormer](https://github.com/wzhouxiff/RestoreFormer): Face Restoration
- [Segment Anything](https://lama-cleaner-docs.vercel.app/plugins#interactive-segmentation): Accurate and fast interactive object segmentation
- [FileManager](https://lama-cleaner-docs.vercel.app/features/file_manager): Browse your pictures conveniently and save them directly to the output directory.
- More features at [lama-cleaner-docs](https://lama-cleaner-docs.vercel.app/)
## Quick Start
Lama Cleaner make it easy to use SOTA AI model in just two commands:
```bash
# In order to use the GPU, install cuda version of pytorch first.
# pip install torch==1.13.1+cu117 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu117
pip install lama-cleaner
lama-cleaner --model=lama --device=cpu --port=8080
```
That's it, Lama Cleaner is now running at http://localhost:8080
See all command line arguments at [lama-cleaner-docs](https://lama-cleaner-docs.vercel.app/install/pip)
## Development
Only needed if you plan to modify the frontend and recompile yourself.
### Frontend
Frontend code are modified from [cleanup.pictures](https://github.com/initml/cleanup.pictures), You can experience their
great online services [here](https://cleanup.pictures/).
- Install dependencies:`cd lama_cleaner/app/ && pnpm install`
- Start development server: `pnpm start`
- Build: `pnpm build`

View File

@ -10,6 +10,6 @@ warnings.simplefilter("ignore", UserWarning)
def entry_point(): def entry_point():
# To make os.environ["XDG_CACHE_HOME"] = args.model_cache_dir works for diffusers # To make os.environ["XDG_CACHE_HOME"] = args.model_cache_dir works for diffusers
# https://github.com/huggingface/diffusers/blob/be99201a567c1ccd841dc16fb24e88f7f239c187/src/diffusers/utils/constants.py#L18 # https://github.com/huggingface/diffusers/blob/be99201a567c1ccd841dc16fb24e88f7f239c187/src/diffusers/utils/constants.py#L18
from lama_cleaner.cli import typer_app from iopaint.cli import typer_app
typer_app() typer_app()

View File

@ -21,8 +21,8 @@ from fastapi.staticfiles import StaticFiles
from loguru import logger from loguru import logger
from socketio import AsyncServer from socketio import AsyncServer
from lama_cleaner.file_manager import FileManager from iopaint.file_manager import FileManager
from lama_cleaner.helper import ( from iopaint.helper import (
load_img, load_img,
decode_base64_to_image, decode_base64_to_image,
pil_to_bytes, pil_to_bytes,
@ -31,12 +31,12 @@ from lama_cleaner.helper import (
gen_frontend_mask, gen_frontend_mask,
adjust_mask, adjust_mask,
) )
from lama_cleaner.model.utils import torch_gc from iopaint.model.utils import torch_gc
from lama_cleaner.model_info import ModelInfo from iopaint.model_info import ModelInfo
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.plugins import build_plugins from iopaint.plugins import build_plugins
from lama_cleaner.plugins.base_plugin import BasePlugin from iopaint.plugins.base_plugin import BasePlugin
from lama_cleaner.schema import ( from iopaint.schema import (
GenInfoResponse, GenInfoResponse,
ApiConfig, ApiConfig,
ServerConfigResponse, ServerConfigResponse,
@ -356,7 +356,7 @@ class Api:
if __name__ == "__main__": if __name__ == "__main__":
from lama_cleaner.schema import InteractiveSegModel, RealESRGANModel from iopaint.schema import InteractiveSegModel, RealESRGANModel
app = FastAPI() app = FastAPI()
api = Api( api = Api(

View File

@ -17,9 +17,9 @@ from rich.progress import (
TimeRemainingColumn, TimeRemainingColumn,
) )
from lama_cleaner.helper import pil_to_bytes from iopaint.helper import pil_to_bytes
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
def glob_images(path: Path) -> Dict[str, Path]: def glob_images(path: Path) -> Dict[str, Path]:

View File

@ -9,8 +9,8 @@ import nvidia_smi
import psutil import psutil
import torch import torch
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.schema import InpaintRequest, HDStrategy, SDSampler from iopaint.schema import InpaintRequest, HDStrategy, SDSampler
try: try:
torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_cpu(False)

View File

@ -6,16 +6,16 @@ from fastapi import FastAPI
from loguru import logger from loguru import logger
from typer import Option from typer import Option
from lama_cleaner.const import * from iopaint.const import *
from lama_cleaner.download import cli_download_model, scan_models from iopaint.download import cli_download_model, scan_models
from lama_cleaner.runtime import setup_model_dir, dump_environment_info, check_device from iopaint.runtime import setup_model_dir, dump_environment_info, check_device
typer_app = typer.Typer(pretty_exceptions_show_locals=False, add_completion=False) typer_app = typer.Typer(pretty_exceptions_show_locals=False, add_completion=False)
@typer_app.command(help="Install all plugins dependencies") @typer_app.command(help="Install all plugins dependencies")
def install_plugins_packages(): def install_plugins_packages():
from lama_cleaner.installer import install_plugins_package from iopaint.installer import install_plugins_package
install_plugins_package() install_plugins_package()
@ -67,12 +67,12 @@ def run(
logger.info(f"{model} not found in {model_dir}, try to downloading") logger.info(f"{model} not found in {model_dir}, try to downloading")
cli_download_model(model, model_dir) cli_download_model(model, model_dir)
from lama_cleaner.batch_processing import batch_inpaint from iopaint.batch_processing import batch_inpaint
batch_inpaint(model, device, image, mask, output, config, concat) batch_inpaint(model, device, image, mask, output, config, concat)
@typer_app.command(help="Start lama cleaner server") @typer_app.command(help="Start IOPaint server")
def start( def start(
host: str = Option("127.0.0.1"), host: str = Option("127.0.0.1"),
port: int = Option(8080), port: int = Option(8080),
@ -136,8 +136,8 @@ def start(
logger.info(f"{model} not found in {model_dir}, try to downloading") logger.info(f"{model} not found in {model_dir}, try to downloading")
cli_download_model(model, model_dir) cli_download_model(model, model_dir)
from lama_cleaner.api import Api from iopaint.api import Api
from lama_cleaner.schema import ApiConfig from iopaint.schema import ApiConfig
app = FastAPI() app = FastAPI()
api = Api( api = Api(

View File

@ -6,21 +6,21 @@ from huggingface_hub.constants import HF_HUB_CACHE
from loguru import logger from loguru import logger
from pathlib import Path from pathlib import Path
from lama_cleaner.const import ( from iopaint.const import (
DEFAULT_MODEL_DIR, DEFAULT_MODEL_DIR,
DIFFUSERS_SD_CLASS_NAME, DIFFUSERS_SD_CLASS_NAME,
DIFFUSERS_SD_INPAINT_CLASS_NAME, DIFFUSERS_SD_INPAINT_CLASS_NAME,
DIFFUSERS_SDXL_CLASS_NAME, DIFFUSERS_SDXL_CLASS_NAME,
DIFFUSERS_SDXL_INPAINT_CLASS_NAME, DIFFUSERS_SDXL_INPAINT_CLASS_NAME,
) )
from lama_cleaner.model.utils import handle_from_pretrained_exceptions from iopaint.model.utils import handle_from_pretrained_exceptions
from lama_cleaner.model_info import ModelInfo, ModelType from iopaint.model_info import ModelInfo, ModelType
from lama_cleaner.runtime import setup_model_dir from iopaint.runtime import setup_model_dir
def cli_download_model(model: str, model_dir: Path): def cli_download_model(model: str, model_dir: Path):
setup_model_dir(model_dir) setup_model_dir(model_dir)
from lama_cleaner.model import models from iopaint.model import models
if model in models and models[model].is_erase_model: if model in models and models[model].is_erase_model:
logger.info(f"Downloading {model}...") logger.info(f"Downloading {model}...")
@ -85,7 +85,7 @@ def scan_single_file_diffusion_models(cache_dir) -> List[ModelInfo]:
def scan_inpaint_models(model_dir: Path) -> List[ModelInfo]: def scan_inpaint_models(model_dir: Path) -> List[ModelInfo]:
res = [] res = []
from lama_cleaner.model import models from iopaint.model import models
# logger.info(f"Scanning inpaint models in {model_dir}") # logger.info(f"Scanning inpaint models in {model_dir}")

View File

@ -10,7 +10,7 @@ import cv2
from PIL import Image, ImageOps, PngImagePlugin from PIL import Image, ImageOps, PngImagePlugin
import numpy as np import numpy as np
import torch import torch
from lama_cleaner.const import MPS_UNSUPPORT_MODELS from iopaint.const import MPS_UNSUPPORT_MODELS
from loguru import logger from loguru import logger
from torch.hub import download_url_to_file, get_dir from torch.hub import download_url_to_file, get_dir
import hashlib import hashlib
@ -56,12 +56,12 @@ def download_model(url, model_md5: str = None):
try: try:
os.remove(cached_file) os.remove(cached_file)
logger.error( logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner." f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n" f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
) )
except: except:
logger.error( logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart lama-cleaner." f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart iopaint."
) )
exit(-1) exit(-1)
@ -80,12 +80,12 @@ def handle_error(model_path, model_md5, e):
try: try:
os.remove(model_path) os.remove(model_path)
logger.error( logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner." f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n" f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
) )
except: except:
logger.error( logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart lama-cleaner." f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart iopaint."
) )
else: else:
logger.error( logger.error(

View File

@ -6,15 +6,15 @@ import torch
import numpy as np import numpy as np
from loguru import logger from loguru import logger
from lama_cleaner.helper import ( from iopaint.helper import (
boxes_from_mask, boxes_from_mask,
resize_max_size, resize_max_size,
pad_img_to_modulo, pad_img_to_modulo,
switch_mps_device, switch_mps_device,
) )
from lama_cleaner.model.helper.g_diffuser_bot import expand_image from iopaint.model.helper.g_diffuser_bot import expand_image
from lama_cleaner.model.utils import get_scheduler from iopaint.model.utils import get_scheduler
from lama_cleaner.schema import InpaintRequest, HDStrategy, SDSampler from iopaint.schema import InpaintRequest, HDStrategy, SDSampler
class InpaintModel: class InpaintModel:

View File

@ -5,16 +5,16 @@ import torch
from diffusers import ControlNetModel, DiffusionPipeline from diffusers import ControlNetModel, DiffusionPipeline
from loguru import logger from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel from iopaint.model.base import DiffusionInpaintModel
from lama_cleaner.model.helper.controlnet_preprocess import ( from iopaint.model.helper.controlnet_preprocess import (
make_canny_control_image, make_canny_control_image,
make_openpose_control_image, make_openpose_control_image,
make_depth_control_image, make_depth_control_image,
make_inpaint_control_image, make_inpaint_control_image,
) )
from lama_cleaner.model.helper.cpu_text_encoder import CPUTextEncoderWrapper from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
from lama_cleaner.model.utils import get_scheduler, handle_from_pretrained_exceptions from iopaint.model.utils import get_scheduler, handle_from_pretrained_exceptions
from lama_cleaner.schema import InpaintRequest, ModelType from iopaint.schema import InpaintRequest, ModelType
class ControlNet(DiffusionInpaintModel): class ControlNet(DiffusionInpaintModel):

View File

@ -2,7 +2,7 @@ import torch
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like from iopaint.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from loguru import logger from loguru import logger

View File

@ -6,9 +6,9 @@ import torch
import numpy as np import numpy as np
import torch.fft as fft import torch.fft as fft
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
from lama_cleaner.helper import ( from iopaint.helper import (
load_model, load_model,
get_cache_path_by_url, get_cache_path_by_url,
norm_img, norm_img,
@ -16,11 +16,11 @@ from lama_cleaner.helper import (
resize_max_size, resize_max_size,
download_model, download_model,
) )
from lama_cleaner.model.base import InpaintModel from iopaint.model.base import InpaintModel
from torch import conv2d, nn from torch import conv2d, nn
import torch.nn.functional as F import torch.nn.functional as F
from lama_cleaner.model.utils import ( from iopaint.model.utils import (
setup_filter, setup_filter,
_parse_scaling, _parse_scaling,
_parse_padding, _parse_padding,

View File

@ -1,5 +1,5 @@
import torch import torch
from lama_cleaner.model.utils import torch_gc from iopaint.model.utils import torch_gc
class CPUTextEncoderWrapper(torch.nn.Module): class CPUTextEncoderWrapper(torch.nn.Module):

View File

@ -3,8 +3,8 @@ import cv2
import torch import torch
from loguru import logger from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel from iopaint.model.base import DiffusionInpaintModel
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
class InstructPix2Pix(DiffusionInpaintModel): class InstructPix2Pix(DiffusionInpaintModel):

View File

@ -3,9 +3,9 @@ import cv2
import numpy as np import numpy as np
import torch import torch
from lama_cleaner.model.base import DiffusionInpaintModel from iopaint.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import get_scheduler from iopaint.model.utils import get_scheduler
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
class Kandinsky(DiffusionInpaintModel): class Kandinsky(DiffusionInpaintModel):

View File

@ -4,14 +4,14 @@ import cv2
import numpy as np import numpy as np
import torch import torch
from lama_cleaner.helper import ( from iopaint.helper import (
norm_img, norm_img,
get_cache_path_by_url, get_cache_path_by_url,
load_jit_model, load_jit_model,
download_model, download_model,
) )
from lama_cleaner.model.base import InpaintModel from iopaint.model.base import InpaintModel
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
LAMA_MODEL_URL = os.environ.get( LAMA_MODEL_URL = os.environ.get(
"LAMA_MODEL_URL", "LAMA_MODEL_URL",

View File

@ -4,20 +4,20 @@ import numpy as np
import torch import torch
from loguru import logger from loguru import logger
from lama_cleaner.model.base import InpaintModel from iopaint.model.base import InpaintModel
from lama_cleaner.model.ddim_sampler import DDIMSampler from iopaint.model.ddim_sampler import DDIMSampler
from lama_cleaner.model.plms_sampler import PLMSSampler from iopaint.model.plms_sampler import PLMSSampler
from lama_cleaner.schema import InpaintRequest, LDMSampler from iopaint.schema import InpaintRequest, LDMSampler
torch.manual_seed(42) torch.manual_seed(42)
import torch.nn as nn import torch.nn as nn
from lama_cleaner.helper import ( from iopaint.helper import (
download_model, download_model,
norm_img, norm_img,
get_cache_path_by_url, get_cache_path_by_url,
load_jit_model, load_jit_model,
) )
from lama_cleaner.model.utils import ( from iopaint.model.utils import (
make_beta_schedule, make_beta_schedule,
timestep_embedding, timestep_embedding,
) )

View File

@ -7,9 +7,9 @@ import torch
import time import time
from loguru import logger from loguru import logger
from lama_cleaner.helper import get_cache_path_by_url, load_jit_model, download_model from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model
from lama_cleaner.model.base import InpaintModel from iopaint.model.base import InpaintModel
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
MANGA_INPAINTOR_MODEL_URL = os.environ.get( MANGA_INPAINTOR_MODEL_URL = os.environ.get(

View File

@ -8,14 +8,14 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint import torch.utils.checkpoint as checkpoint
from lama_cleaner.helper import ( from iopaint.helper import (
load_model, load_model,
get_cache_path_by_url, get_cache_path_by_url,
norm_img, norm_img,
download_model, download_model,
) )
from lama_cleaner.model.base import InpaintModel from iopaint.model.base import InpaintModel
from lama_cleaner.model.utils import ( from iopaint.model.utils import (
setup_filter, setup_filter,
Conv2dLayer, Conv2dLayer,
FullyConnectedLayer, FullyConnectedLayer,
@ -28,7 +28,7 @@ from lama_cleaner.model.utils import (
normalize_2nd_moment, normalize_2nd_moment,
set_seed, set_seed,
) )
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
class ModulatedConv2d(nn.Module): class ModulatedConv2d(nn.Module):

View File

@ -3,7 +3,7 @@ import os
import cv2 import cv2
import torch import torch
from lama_cleaner.helper import ( from iopaint.helper import (
load_jit_model, load_jit_model,
download_model, download_model,
get_cache_path_by_url, get_cache_path_by_url,
@ -11,8 +11,8 @@ from lama_cleaner.helper import (
resize_max_size, resize_max_size,
norm_img, norm_img,
) )
from lama_cleaner.model.base import InpaintModel from iopaint.model.base import InpaintModel
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
MIGAN_MODEL_URL = os.environ.get( MIGAN_MODEL_URL = os.environ.get(
"MIGAN_MODEL_URL", "MIGAN_MODEL_URL",

View File

@ -1,6 +1,6 @@
import cv2 import cv2
from lama_cleaner.model.base import InpaintModel from iopaint.model.base import InpaintModel
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
flag_map = {"INPAINT_NS": cv2.INPAINT_NS, "INPAINT_TELEA": cv2.INPAINT_TELEA} flag_map = {"INPAINT_NS": cv2.INPAINT_NS, "INPAINT_TELEA": cv2.INPAINT_TELEA}

View File

@ -4,9 +4,9 @@ import cv2
import torch import torch
from loguru import logger from loguru import logger
from lama_cleaner.helper import decode_base64_to_image from iopaint.helper import decode_base64_to_image
from lama_cleaner.model.base import DiffusionInpaintModel from iopaint.model.base import DiffusionInpaintModel
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
class PaintByExample(DiffusionInpaintModel): class PaintByExample(DiffusionInpaintModel):

View File

@ -1,7 +1,7 @@
# From: https://github.com/CompVis/latent-diffusion/blob/main/ldm/models/diffusion/plms.py # From: https://github.com/CompVis/latent-diffusion/blob/main/ldm/models/diffusion/plms.py
import torch import torch
import numpy as np import numpy as np
from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like from iopaint.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from tqdm import tqdm from tqdm import tqdm

View File

@ -4,10 +4,10 @@ import cv2
import torch import torch
from loguru import logger from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel from iopaint.model.base import DiffusionInpaintModel
from lama_cleaner.model.helper.cpu_text_encoder import CPUTextEncoderWrapper from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
from lama_cleaner.model.utils import handle_from_pretrained_exceptions from iopaint.model.utils import handle_from_pretrained_exceptions
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
from .powerpaint_tokenizer import add_task_to_prompt from .powerpaint_tokenizer import add_task_to_prompt

View File

@ -5,7 +5,7 @@ import random
from typing import Any, List, Optional, Union from typing import Any, List, Optional, Union
from transformers import CLIPTokenizer from transformers import CLIPTokenizer
from lama_cleaner.schema import PowerPaintTask from iopaint.schema import PowerPaintTask
def add_task_to_prompt(prompt, negative_prompt, task: PowerPaintTask): def add_task_to_prompt(prompt, negative_prompt, task: PowerPaintTask):

View File

@ -3,10 +3,10 @@ import cv2
import torch import torch
from loguru import logger from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel from iopaint.model.base import DiffusionInpaintModel
from lama_cleaner.model.helper.cpu_text_encoder import CPUTextEncoderWrapper from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
from lama_cleaner.model.utils import handle_from_pretrained_exceptions from iopaint.model.utils import handle_from_pretrained_exceptions
from lama_cleaner.schema import InpaintRequest, ModelType from iopaint.schema import InpaintRequest, ModelType
class SD(DiffusionInpaintModel): class SD(DiffusionInpaintModel):

View File

@ -6,9 +6,9 @@ import torch
from diffusers import AutoencoderKL from diffusers import AutoencoderKL
from loguru import logger from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel from iopaint.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import handle_from_pretrained_exceptions from iopaint.model.utils import handle_from_pretrained_exceptions
from lama_cleaner.schema import InpaintRequest, ModelType from iopaint.schema import InpaintRequest, ModelType
class SDXL(DiffusionInpaintModel): class SDXL(DiffusionInpaintModel):

View File

@ -27,7 +27,7 @@ from diffusers import (
from diffusers.configuration_utils import FrozenDict from diffusers.configuration_utils import FrozenDict
from loguru import logger from loguru import logger
from lama_cleaner.schema import SDSampler from iopaint.schema import SDSampler
from torch import conv2d, conv_transpose2d from torch import conv2d, conv_transpose2d

View File

@ -5,11 +5,11 @@ import cv2
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from lama_cleaner.helper import get_cache_path_by_url, load_jit_model, download_model from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
import numpy as np import numpy as np
from lama_cleaner.model.base import InpaintModel from iopaint.model.base import InpaintModel
ZITS_INPAINT_MODEL_URL = os.environ.get( ZITS_INPAINT_MODEL_URL = os.environ.get(
"ZITS_INPAINT_MODEL_URL", "ZITS_INPAINT_MODEL_URL",

View File

@ -2,13 +2,13 @@ from typing import List
from pydantic import computed_field, BaseModel from pydantic import computed_field, BaseModel
from lama_cleaner.const import ( from iopaint.const import (
SDXL_CONTROLNET_CHOICES, SDXL_CONTROLNET_CHOICES,
SD2_CONTROLNET_CHOICES, SD2_CONTROLNET_CHOICES,
SD_CONTROLNET_CHOICES, SD_CONTROLNET_CHOICES,
) )
from lama_cleaner.model import InstructPix2Pix, Kandinsky22, PowerPaint, SD2 from iopaint.model import InstructPix2Pix, Kandinsky22, PowerPaint, SD2
from lama_cleaner.schema import ModelType from iopaint.schema import ModelType
class ModelInfo(BaseModel): class ModelInfo(BaseModel):

View File

@ -4,12 +4,12 @@ import torch
from loguru import logger from loguru import logger
import numpy as np import numpy as np
from lama_cleaner.download import scan_models from iopaint.download import scan_models
from lama_cleaner.helper import switch_mps_device from iopaint.helper import switch_mps_device
from lama_cleaner.model import models, ControlNet, SD, SDXL from iopaint.model import models, ControlNet, SD, SDXL
from lama_cleaner.model.utils import torch_gc from iopaint.model.utils import torch_gc
from lama_cleaner.model_info import ModelInfo, ModelType from iopaint.model_info import ModelInfo, ModelType
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
class ModelManager: class ModelManager:

View File

@ -5,9 +5,9 @@ import torch.nn.functional as F
import numpy as np import numpy as np
from PIL import Image from PIL import Image
from lama_cleaner.helper import load_model from iopaint.helper import load_model
from lama_cleaner.plugins.base_plugin import BasePlugin from iopaint.plugins.base_plugin import BasePlugin
from lama_cleaner.schema import RunPluginRequest from iopaint.schema import RunPluginRequest
class REBNCONV(nn.Module): class REBNCONV(nn.Module):

View File

@ -1,7 +1,7 @@
from loguru import logger from loguru import logger
import numpy as np import numpy as np
from lama_cleaner.schema import RunPluginRequest from iopaint.schema import RunPluginRequest
class BasePlugin: class BasePlugin:

View File

@ -2,9 +2,9 @@ import cv2
import numpy as np import numpy as np
from loguru import logger from loguru import logger
from lama_cleaner.helper import download_model from iopaint.helper import download_model
from lama_cleaner.plugins.base_plugin import BasePlugin from iopaint.plugins.base_plugin import BasePlugin
from lama_cleaner.schema import RunPluginRequest from iopaint.schema import RunPluginRequest
class GFPGANPlugin(BasePlugin): class GFPGANPlugin(BasePlugin):

View File

@ -7,10 +7,10 @@ import numpy as np
import torch import torch
from loguru import logger from loguru import logger
from lama_cleaner.helper import download_model from iopaint.helper import download_model
from lama_cleaner.plugins.base_plugin import BasePlugin from iopaint.plugins.base_plugin import BasePlugin
from lama_cleaner.plugins.segment_anything import SamPredictor, sam_model_registry from iopaint.plugins.segment_anything import SamPredictor, sam_model_registry
from lama_cleaner.schema import RunPluginRequest from iopaint.schema import RunPluginRequest
# 从小到大 # 从小到大
SEGMENT_ANYTHING_MODELS = { SEGMENT_ANYTHING_MODELS = {

View File

@ -5,10 +5,10 @@ import numpy as np
import torch import torch
from loguru import logger from loguru import logger
from lama_cleaner.const import RealESRGANModel from iopaint.const import RealESRGANModel
from lama_cleaner.helper import download_model from iopaint.helper import download_model
from lama_cleaner.plugins.base_plugin import BasePlugin from iopaint.plugins.base_plugin import BasePlugin
from lama_cleaner.schema import RunPluginRequest from iopaint.schema import RunPluginRequest
class RealESRGANUpscaler(BasePlugin): class RealESRGANUpscaler(BasePlugin):

View File

@ -3,8 +3,8 @@ import cv2
import numpy as np import numpy as np
from torch.hub import get_dir from torch.hub import get_dir
from lama_cleaner.plugins.base_plugin import BasePlugin from iopaint.plugins.base_plugin import BasePlugin
from lama_cleaner.schema import RunPluginRequest from iopaint.schema import RunPluginRequest
class RemoveBG(BasePlugin): class RemoveBG(BasePlugin):

View File

@ -2,9 +2,9 @@ import cv2
import numpy as np import numpy as np
from loguru import logger from loguru import logger
from lama_cleaner.helper import download_model from iopaint.helper import download_model
from lama_cleaner.plugins.base_plugin import BasePlugin from iopaint.plugins.base_plugin import BasePlugin
from lama_cleaner.schema import RunPluginRequest from iopaint.schema import RunPluginRequest
class RestoreFormerPlugin(BasePlugin): class RestoreFormerPlugin(BasePlugin):

View File

@ -8,7 +8,7 @@ import torch
from functools import partial from functools import partial
from lama_cleaner.plugins.segment_anything.modeling.tiny_vit_sam import TinyViT from iopaint.plugins.segment_anything.modeling.tiny_vit_sam import TinyViT
from .modeling import ( from .modeling import (
ImageEncoderViT, ImageEncoderViT,

View File

@ -9,7 +9,7 @@ from loguru import logger
from rich import print from rich import print
from typing import Dict, Any from typing import Dict, Any
from lama_cleaner.const import Device from iopaint.const import Device
_PY_VERSION: str = sys.version.split()[0].rstrip("+") _PY_VERSION: str = sys.version.split()[0].rstrip("+")
@ -28,7 +28,7 @@ _CANDIDATES = [
"transformers", "transformers",
"opencv-python", "opencv-python",
"accelerate", "accelerate",
"lama-cleaner", "iopaint",
"rembg", "rembg",
"realesrgan", "realesrgan",
"gfpgan", "gfpgan",

View File

@ -5,7 +5,7 @@ from typing import Optional, Literal, List
from pydantic import BaseModel, Field, field_validator from pydantic import BaseModel, Field, field_validator
from lama_cleaner.const import Device, InteractiveSegModel, RealESRGANModel from iopaint.const import Device, InteractiveSegModel, RealESRGANModel
class PluginInfo(BaseModel): class PluginInfo(BaseModel):

View File

Before

Width:  |  Height:  |  Size: 480 KiB

After

Width:  |  Height:  |  Size: 480 KiB

View File

Before

Width:  |  Height:  |  Size: 51 KiB

After

Width:  |  Height:  |  Size: 51 KiB

View File

Before

Width:  |  Height:  |  Size: 481 KiB

After

Width:  |  Height:  |  Size: 481 KiB

View File

Before

Width:  |  Height:  |  Size: 215 KiB

After

Width:  |  Height:  |  Size: 215 KiB

View File

Before

Width:  |  Height:  |  Size: 305 KiB

After

Width:  |  Height:  |  Size: 305 KiB

View File

Before

Width:  |  Height:  |  Size: 129 KiB

After

Width:  |  Height:  |  Size: 129 KiB

View File

Before

Width:  |  Height:  |  Size: 7.7 KiB

After

Width:  |  Height:  |  Size: 7.7 KiB

View File

Before

Width:  |  Height:  |  Size: 395 KiB

After

Width:  |  Height:  |  Size: 395 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

View File

Before

Width:  |  Height:  |  Size: 69 KiB

After

Width:  |  Height:  |  Size: 69 KiB

View File

@ -1,6 +1,6 @@
import cv2 import cv2
from lama_cleaner.helper import adjust_mask from iopaint.helper import adjust_mask
from lama_cleaner.tests.utils import current_dir, save_dir from iopaint.tests.utils import current_dir, save_dir
mask_p = current_dir / "overture-creations-5sI6fQgYIuo_mask.png" mask_p = current_dir / "overture-creations-5sI6fQgYIuo_mask.png"

View File

@ -1,7 +1,7 @@
import os import os
from lama_cleaner.const import SD_CONTROLNET_CHOICES from iopaint.const import SD_CONTROLNET_CHOICES
from lama_cleaner.tests.utils import current_dir, check_device, get_config, assert_equal from iopaint.tests.utils import current_dir, check_device, get_config, assert_equal
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path from pathlib import Path
@ -9,8 +9,8 @@ from pathlib import Path
import pytest import pytest
import torch import torch
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler from iopaint.schema import HDStrategy, SDSampler
model_name = "runwayml/stable-diffusion-inpainting" model_name = "runwayml/stable-diffusion-inpainting"

View File

@ -3,9 +3,9 @@ from pathlib import Path
import pytest import pytest
import torch import torch
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy from iopaint.schema import HDStrategy
from lama_cleaner.tests.utils import get_config, check_device, assert_equal, current_dir from iopaint.tests.utils import get_config, check_device, assert_equal, current_dir
model_name = "timbrooks/instruct-pix2pix" model_name = "timbrooks/instruct-pix2pix"

View File

@ -1,5 +1,5 @@
from lama_cleaner.helper import load_img from iopaint.helper import load_img
from lama_cleaner.tests.utils import current_dir from iopaint.tests.utils import current_dir
png_img_p = current_dir / "image.png" png_img_p = current_dir / "image.png"
jpg_img_p = current_dir / "bunny.jpeg" jpg_img_p = current_dir / "bunny.jpeg"

View File

@ -1,9 +1,9 @@
import pytest import pytest
import torch import torch
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, LDMSampler from iopaint.schema import HDStrategy, LDMSampler
from lama_cleaner.tests.utils import assert_equal, get_config, current_dir, check_device from iopaint.tests.utils import assert_equal, get_config, current_dir, check_device
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) @pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])

View File

@ -1,6 +1,6 @@
def test_load_model(): def test_load_model():
from lama_cleaner.plugins import InteractiveSeg from iopaint.plugins import InteractiveSeg
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
interactive_seg_model = InteractiveSeg("vit_l", "cpu") interactive_seg_model = InteractiveSeg("vit_l", "cpu")

View File

@ -1,12 +1,12 @@
import os import os
from lama_cleaner.schema import InpaintRequest from iopaint.schema import InpaintRequest
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import torch import torch
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
def test_model_switch(): def test_model_switch():

View File

@ -1,6 +1,6 @@
import os import os
from lama_cleaner.tests.utils import current_dir, check_device from iopaint.tests.utils import current_dir, check_device
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path from pathlib import Path
@ -8,9 +8,9 @@ from pathlib import Path
import pytest import pytest
import torch import torch
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler from iopaint.schema import HDStrategy, SDSampler
from lama_cleaner.tests.test_model import get_config, assert_equal from iopaint.tests.test_model import get_config, assert_equal
@pytest.mark.parametrize("name", ["runwayml/stable-diffusion-inpainting"]) @pytest.mark.parametrize("name", ["runwayml/stable-diffusion-inpainting"])

View File

@ -2,9 +2,9 @@ import cv2
import pytest import pytest
from PIL import Image from PIL import Image
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy from iopaint.schema import HDStrategy
from lama_cleaner.tests.utils import ( from iopaint.tests.utils import (
current_dir, current_dir,
get_config, get_config,
get_data, get_data,

View File

@ -3,17 +3,17 @@ import os
import time import time
from PIL import Image from PIL import Image
from lama_cleaner.helper import encode_pil_to_base64, gen_frontend_mask from iopaint.helper import encode_pil_to_base64, gen_frontend_mask
from lama_cleaner.plugins.anime_seg import AnimeSeg from iopaint.plugins.anime_seg import AnimeSeg
from lama_cleaner.schema import RunPluginRequest from iopaint.schema import RunPluginRequest
from lama_cleaner.tests.utils import check_device, current_dir, save_dir from iopaint.tests.utils import check_device, current_dir, save_dir
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import cv2 import cv2
import pytest import pytest
from lama_cleaner.plugins import ( from iopaint.plugins import (
RemoveBG, RemoveBG,
RealESRGANUpscaler, RealESRGANUpscaler,
GFPGANPlugin, GFPGANPlugin,

View File

@ -5,7 +5,7 @@ from typing import List
from PIL import Image from PIL import Image
from lama_cleaner.helper import pil_to_bytes, load_img from iopaint.helper import pil_to_bytes, load_img
current_dir = Path(__file__).parent.absolute().resolve() current_dir = Path(__file__).parent.absolute().resolve()

View File

@ -2,7 +2,7 @@ import os
from loguru import logger from loguru import logger
from lama_cleaner.tests.utils import check_device, get_config, assert_equal from iopaint.tests.utils import check_device, get_config, assert_equal
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path from pathlib import Path
@ -10,8 +10,8 @@ from pathlib import Path
import pytest import pytest
import torch import torch
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler, FREEUConfig from iopaint.schema import HDStrategy, SDSampler, FREEUConfig
current_dir = Path(__file__).parent.absolute().resolve() current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result" save_dir = current_dir / "result"

View File

@ -1,15 +1,15 @@
import os import os
from lama_cleaner.tests.utils import check_device, current_dir from iopaint.tests.utils import check_device, current_dir
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import pytest import pytest
import torch import torch
from lama_cleaner.model_manager import ModelManager from iopaint.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler, FREEUConfig from iopaint.schema import HDStrategy, SDSampler, FREEUConfig
from lama_cleaner.tests.test_model import get_config, assert_equal from iopaint.tests.test_model import get_config, assert_equal
@pytest.mark.parametrize("device", ["cuda", "mps"]) @pytest.mark.parametrize("device", ["cuda", "mps"])

View File

@ -3,8 +3,8 @@ import cv2
import pytest import pytest
import torch import torch
from lama_cleaner.helper import encode_pil_to_base64 from iopaint.helper import encode_pil_to_base64
from lama_cleaner.schema import LDMSampler, HDStrategy, InpaintRequest, SDSampler from iopaint.schema import LDMSampler, HDStrategy, InpaintRequest, SDSampler
from PIL import Image from PIL import Image
current_dir = Path(__file__).parent.absolute().resolve() current_dir = Path(__file__).parent.absolute().resolve()

View File

@ -5,7 +5,7 @@ from datetime import datetime
import gradio as gr import gradio as gr
from loguru import logger from loguru import logger
from lama_cleaner.const import * from iopaint.const import *
_config_file = None _config_file = None

View File

@ -1,4 +1,4 @@
from lama_cleaner import entry_point from iopaint import entry_point
if __name__ == "__main__": if __name__ == "__main__":
entry_point() entry_point()

View File

@ -4,6 +4,8 @@ set -e
pushd ./web_app pushd ./web_app
npm run build npm run build
popd popd
rm -r ./iopaint/web_app
cp -r web_app/dist ./iopaint/web_app
rm -r -f dist rm -r -f dist
python3 setup.py sdist bdist_wheel python3 setup.py sdist bdist_wheel

View File

@ -6,7 +6,6 @@ safetensors
controlnet-aux==0.0.3 controlnet-aux==0.0.3
fastapi==0.108.0 fastapi==0.108.0
python-socketio==5.7.2 python-socketio==5.7.2
flaskwebgui==0.3.5
typer typer
pydantic pydantic
rich rich

Some files were not shown because too many files have changed in this diff Show More