rename to iopaint
2
.gitignore
vendored
@ -4,7 +4,7 @@ examples/
|
||||
.idea/
|
||||
.vscode/
|
||||
build
|
||||
!lama_cleaner/app/build
|
||||
!iopaint/app/build
|
||||
dist/
|
||||
lama_cleaner.egg-info/
|
||||
venv/
|
||||
|
76
README.md
@ -1,75 +1 @@
|
||||
<p align="center">
|
||||
<img alt="logo" height=256 src="./assets/logo.png" />
|
||||
</p>
|
||||
<h1 align="center">Lama Cleaner</h1>
|
||||
<p align="center">A free and open-source inpainting tool powered by SOTA AI model.</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/Sanster/lama-cleaner">
|
||||
<img alt="total download" src="https://pepy.tech/badge/lama-cleaner" />
|
||||
</a>
|
||||
<a href="https://pypi.org/project/lama-cleaner/">
|
||||
<img alt="version" src="https://img.shields.io/pypi/v/lama-cleaner" />
|
||||
</a>
|
||||
<a href="https://colab.research.google.com/drive/1e3ZkAJxvkK3uzaTGu91N9TvI_Mahs0Wb?usp=sharing">
|
||||
<img alt="Open in Colab" src="https://colab.research.google.com/assets/colab-badge.svg" />
|
||||
</a>
|
||||
|
||||
<a href="https://huggingface.co/spaces/Sanster/Lama-Cleaner-lama">
|
||||
<img alt="Hugging Face Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue" />
|
||||
</a>
|
||||
|
||||
<a href="">
|
||||
<img alt="python version" src="https://img.shields.io/pypi/pyversions/lama-cleaner" />
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/cwq1913/lama-cleaner">
|
||||
<img alt="version" src="https://img.shields.io/docker/pulls/cwq1913/lama-cleaner" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
https://user-images.githubusercontent.com/3998421/196976498-ba1ad3ab-fa18-4c55-965f-5c6683141375.mp4
|
||||
|
||||
## Features
|
||||
|
||||
- Completely free and open-source, fully self-hosted, support CPU & GPU & M1/2
|
||||
- [Windows 1-Click Installer](https://lama-cleaner-docs.vercel.app/install/windows_1click_installer)
|
||||
- [Native macOS app](https://opticlean.io/)
|
||||
- Multiple SOTA AI [models](https://lama-cleaner-docs.vercel.app/models)
|
||||
- Erase model: LaMa/LDM/ZITS/MAT/FcF/Manga
|
||||
- Erase and Replace model: Stable Diffusion/Paint by Example
|
||||
- [Plugins](https://lama-cleaner-docs.vercel.app/plugins) for post-processing:
|
||||
- [RemoveBG](https://github.com/danielgatis/rembg): Remove images background
|
||||
- [RealESRGAN](https://github.com/xinntao/Real-ESRGAN): Super Resolution
|
||||
- [GFPGAN](https://github.com/TencentARC/GFPGAN): Face Restoration
|
||||
- [RestoreFormer](https://github.com/wzhouxiff/RestoreFormer): Face Restoration
|
||||
- [Segment Anything](https://lama-cleaner-docs.vercel.app/plugins#interactive-segmentation): Accurate and fast interactive object segmentation
|
||||
- [FileManager](https://lama-cleaner-docs.vercel.app/features/file_manager): Browse your pictures conveniently and save them directly to the output directory.
|
||||
- More features at [lama-cleaner-docs](https://lama-cleaner-docs.vercel.app/)
|
||||
|
||||
## Quick Start
|
||||
|
||||
Lama Cleaner make it easy to use SOTA AI model in just two commands:
|
||||
|
||||
```bash
|
||||
# In order to use the GPU, install cuda version of pytorch first.
|
||||
# pip install torch==1.13.1+cu117 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
pip install lama-cleaner
|
||||
lama-cleaner --model=lama --device=cpu --port=8080
|
||||
```
|
||||
|
||||
That's it, Lama Cleaner is now running at http://localhost:8080
|
||||
|
||||
See all command line arguments at [lama-cleaner-docs](https://lama-cleaner-docs.vercel.app/install/pip)
|
||||
|
||||
## Development
|
||||
|
||||
Only needed if you plan to modify the frontend and recompile yourself.
|
||||
|
||||
### Frontend
|
||||
|
||||
Frontend code are modified from [cleanup.pictures](https://github.com/initml/cleanup.pictures), You can experience their
|
||||
great online services [here](https://cleanup.pictures/).
|
||||
|
||||
- Install dependencies:`cd lama_cleaner/app/ && pnpm install`
|
||||
- Start development server: `pnpm start`
|
||||
- Build: `pnpm build`
|
||||
# IOPaint
|
@ -10,6 +10,6 @@ warnings.simplefilter("ignore", UserWarning)
|
||||
def entry_point():
|
||||
# To make os.environ["XDG_CACHE_HOME"] = args.model_cache_dir works for diffusers
|
||||
# https://github.com/huggingface/diffusers/blob/be99201a567c1ccd841dc16fb24e88f7f239c187/src/diffusers/utils/constants.py#L18
|
||||
from lama_cleaner.cli import typer_app
|
||||
from iopaint.cli import typer_app
|
||||
|
||||
typer_app()
|
@ -21,8 +21,8 @@ from fastapi.staticfiles import StaticFiles
|
||||
from loguru import logger
|
||||
from socketio import AsyncServer
|
||||
|
||||
from lama_cleaner.file_manager import FileManager
|
||||
from lama_cleaner.helper import (
|
||||
from iopaint.file_manager import FileManager
|
||||
from iopaint.helper import (
|
||||
load_img,
|
||||
decode_base64_to_image,
|
||||
pil_to_bytes,
|
||||
@ -31,12 +31,12 @@ from lama_cleaner.helper import (
|
||||
gen_frontend_mask,
|
||||
adjust_mask,
|
||||
)
|
||||
from lama_cleaner.model.utils import torch_gc
|
||||
from lama_cleaner.model_info import ModelInfo
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.plugins import build_plugins
|
||||
from lama_cleaner.plugins.base_plugin import BasePlugin
|
||||
from lama_cleaner.schema import (
|
||||
from iopaint.model.utils import torch_gc
|
||||
from iopaint.model_info import ModelInfo
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.plugins import build_plugins
|
||||
from iopaint.plugins.base_plugin import BasePlugin
|
||||
from iopaint.schema import (
|
||||
GenInfoResponse,
|
||||
ApiConfig,
|
||||
ServerConfigResponse,
|
||||
@ -356,7 +356,7 @@ class Api:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from lama_cleaner.schema import InteractiveSegModel, RealESRGANModel
|
||||
from iopaint.schema import InteractiveSegModel, RealESRGANModel
|
||||
|
||||
app = FastAPI()
|
||||
api = Api(
|
@ -17,9 +17,9 @@ from rich.progress import (
|
||||
TimeRemainingColumn,
|
||||
)
|
||||
|
||||
from lama_cleaner.helper import pil_to_bytes
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.helper import pil_to_bytes
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
|
||||
def glob_images(path: Path) -> Dict[str, Path]:
|
@ -9,8 +9,8 @@ import nvidia_smi
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.schema import InpaintRequest, HDStrategy, SDSampler
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.schema import InpaintRequest, HDStrategy, SDSampler
|
||||
|
||||
try:
|
||||
torch._C._jit_override_can_fuse_on_cpu(False)
|
@ -6,16 +6,16 @@ from fastapi import FastAPI
|
||||
from loguru import logger
|
||||
from typer import Option
|
||||
|
||||
from lama_cleaner.const import *
|
||||
from lama_cleaner.download import cli_download_model, scan_models
|
||||
from lama_cleaner.runtime import setup_model_dir, dump_environment_info, check_device
|
||||
from iopaint.const import *
|
||||
from iopaint.download import cli_download_model, scan_models
|
||||
from iopaint.runtime import setup_model_dir, dump_environment_info, check_device
|
||||
|
||||
typer_app = typer.Typer(pretty_exceptions_show_locals=False, add_completion=False)
|
||||
|
||||
|
||||
@typer_app.command(help="Install all plugins dependencies")
|
||||
def install_plugins_packages():
|
||||
from lama_cleaner.installer import install_plugins_package
|
||||
from iopaint.installer import install_plugins_package
|
||||
|
||||
install_plugins_package()
|
||||
|
||||
@ -67,12 +67,12 @@ def run(
|
||||
logger.info(f"{model} not found in {model_dir}, try to downloading")
|
||||
cli_download_model(model, model_dir)
|
||||
|
||||
from lama_cleaner.batch_processing import batch_inpaint
|
||||
from iopaint.batch_processing import batch_inpaint
|
||||
|
||||
batch_inpaint(model, device, image, mask, output, config, concat)
|
||||
|
||||
|
||||
@typer_app.command(help="Start lama cleaner server")
|
||||
@typer_app.command(help="Start IOPaint server")
|
||||
def start(
|
||||
host: str = Option("127.0.0.1"),
|
||||
port: int = Option(8080),
|
||||
@ -136,8 +136,8 @@ def start(
|
||||
logger.info(f"{model} not found in {model_dir}, try to downloading")
|
||||
cli_download_model(model, model_dir)
|
||||
|
||||
from lama_cleaner.api import Api
|
||||
from lama_cleaner.schema import ApiConfig
|
||||
from iopaint.api import Api
|
||||
from iopaint.schema import ApiConfig
|
||||
|
||||
app = FastAPI()
|
||||
api = Api(
|
@ -6,21 +6,21 @@ from huggingface_hub.constants import HF_HUB_CACHE
|
||||
from loguru import logger
|
||||
from pathlib import Path
|
||||
|
||||
from lama_cleaner.const import (
|
||||
from iopaint.const import (
|
||||
DEFAULT_MODEL_DIR,
|
||||
DIFFUSERS_SD_CLASS_NAME,
|
||||
DIFFUSERS_SD_INPAINT_CLASS_NAME,
|
||||
DIFFUSERS_SDXL_CLASS_NAME,
|
||||
DIFFUSERS_SDXL_INPAINT_CLASS_NAME,
|
||||
)
|
||||
from lama_cleaner.model.utils import handle_from_pretrained_exceptions
|
||||
from lama_cleaner.model_info import ModelInfo, ModelType
|
||||
from lama_cleaner.runtime import setup_model_dir
|
||||
from iopaint.model.utils import handle_from_pretrained_exceptions
|
||||
from iopaint.model_info import ModelInfo, ModelType
|
||||
from iopaint.runtime import setup_model_dir
|
||||
|
||||
|
||||
def cli_download_model(model: str, model_dir: Path):
|
||||
setup_model_dir(model_dir)
|
||||
from lama_cleaner.model import models
|
||||
from iopaint.model import models
|
||||
|
||||
if model in models and models[model].is_erase_model:
|
||||
logger.info(f"Downloading {model}...")
|
||||
@ -85,7 +85,7 @@ def scan_single_file_diffusion_models(cache_dir) -> List[ModelInfo]:
|
||||
|
||||
def scan_inpaint_models(model_dir: Path) -> List[ModelInfo]:
|
||||
res = []
|
||||
from lama_cleaner.model import models
|
||||
from iopaint.model import models
|
||||
|
||||
# logger.info(f"Scanning inpaint models in {model_dir}")
|
||||
|
@ -10,7 +10,7 @@ import cv2
|
||||
from PIL import Image, ImageOps, PngImagePlugin
|
||||
import numpy as np
|
||||
import torch
|
||||
from lama_cleaner.const import MPS_UNSUPPORT_MODELS
|
||||
from iopaint.const import MPS_UNSUPPORT_MODELS
|
||||
from loguru import logger
|
||||
from torch.hub import download_url_to_file, get_dir
|
||||
import hashlib
|
||||
@ -56,12 +56,12 @@ def download_model(url, model_md5: str = None):
|
||||
try:
|
||||
os.remove(cached_file)
|
||||
logger.error(
|
||||
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
|
||||
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint."
|
||||
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
|
||||
)
|
||||
except:
|
||||
logger.error(
|
||||
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart lama-cleaner."
|
||||
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart iopaint."
|
||||
)
|
||||
exit(-1)
|
||||
|
||||
@ -80,12 +80,12 @@ def handle_error(model_path, model_md5, e):
|
||||
try:
|
||||
os.remove(model_path)
|
||||
logger.error(
|
||||
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
|
||||
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint."
|
||||
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
|
||||
)
|
||||
except:
|
||||
logger.error(
|
||||
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart lama-cleaner."
|
||||
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart iopaint."
|
||||
)
|
||||
else:
|
||||
logger.error(
|
@ -6,15 +6,15 @@ import torch
|
||||
import numpy as np
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.helper import (
|
||||
from iopaint.helper import (
|
||||
boxes_from_mask,
|
||||
resize_max_size,
|
||||
pad_img_to_modulo,
|
||||
switch_mps_device,
|
||||
)
|
||||
from lama_cleaner.model.helper.g_diffuser_bot import expand_image
|
||||
from lama_cleaner.model.utils import get_scheduler
|
||||
from lama_cleaner.schema import InpaintRequest, HDStrategy, SDSampler
|
||||
from iopaint.model.helper.g_diffuser_bot import expand_image
|
||||
from iopaint.model.utils import get_scheduler
|
||||
from iopaint.schema import InpaintRequest, HDStrategy, SDSampler
|
||||
|
||||
|
||||
class InpaintModel:
|
@ -5,16 +5,16 @@ import torch
|
||||
from diffusers import ControlNetModel, DiffusionPipeline
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.model.base import DiffusionInpaintModel
|
||||
from lama_cleaner.model.helper.controlnet_preprocess import (
|
||||
from iopaint.model.base import DiffusionInpaintModel
|
||||
from iopaint.model.helper.controlnet_preprocess import (
|
||||
make_canny_control_image,
|
||||
make_openpose_control_image,
|
||||
make_depth_control_image,
|
||||
make_inpaint_control_image,
|
||||
)
|
||||
from lama_cleaner.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
|
||||
from lama_cleaner.model.utils import get_scheduler, handle_from_pretrained_exceptions
|
||||
from lama_cleaner.schema import InpaintRequest, ModelType
|
||||
from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
|
||||
from iopaint.model.utils import get_scheduler, handle_from_pretrained_exceptions
|
||||
from iopaint.schema import InpaintRequest, ModelType
|
||||
|
||||
|
||||
class ControlNet(DiffusionInpaintModel):
|
@ -2,7 +2,7 @@ import torch
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
|
||||
from iopaint.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
|
||||
|
||||
from loguru import logger
|
||||
|
@ -6,9 +6,9 @@ import torch
|
||||
import numpy as np
|
||||
import torch.fft as fft
|
||||
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
from lama_cleaner.helper import (
|
||||
from iopaint.helper import (
|
||||
load_model,
|
||||
get_cache_path_by_url,
|
||||
norm_img,
|
||||
@ -16,11 +16,11 @@ from lama_cleaner.helper import (
|
||||
resize_max_size,
|
||||
download_model,
|
||||
)
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
from iopaint.model.base import InpaintModel
|
||||
from torch import conv2d, nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from lama_cleaner.model.utils import (
|
||||
from iopaint.model.utils import (
|
||||
setup_filter,
|
||||
_parse_scaling,
|
||||
_parse_padding,
|
@ -1,5 +1,5 @@
|
||||
import torch
|
||||
from lama_cleaner.model.utils import torch_gc
|
||||
from iopaint.model.utils import torch_gc
|
||||
|
||||
|
||||
class CPUTextEncoderWrapper(torch.nn.Module):
|
@ -3,8 +3,8 @@ import cv2
|
||||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.model.base import DiffusionInpaintModel
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.model.base import DiffusionInpaintModel
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
|
||||
class InstructPix2Pix(DiffusionInpaintModel):
|
@ -3,9 +3,9 @@ import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from lama_cleaner.model.base import DiffusionInpaintModel
|
||||
from lama_cleaner.model.utils import get_scheduler
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.model.base import DiffusionInpaintModel
|
||||
from iopaint.model.utils import get_scheduler
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
|
||||
class Kandinsky(DiffusionInpaintModel):
|
@ -4,14 +4,14 @@ import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from lama_cleaner.helper import (
|
||||
from iopaint.helper import (
|
||||
norm_img,
|
||||
get_cache_path_by_url,
|
||||
load_jit_model,
|
||||
download_model,
|
||||
)
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.model.base import InpaintModel
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
LAMA_MODEL_URL = os.environ.get(
|
||||
"LAMA_MODEL_URL",
|
@ -4,20 +4,20 @@ import numpy as np
|
||||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
from lama_cleaner.model.ddim_sampler import DDIMSampler
|
||||
from lama_cleaner.model.plms_sampler import PLMSSampler
|
||||
from lama_cleaner.schema import InpaintRequest, LDMSampler
|
||||
from iopaint.model.base import InpaintModel
|
||||
from iopaint.model.ddim_sampler import DDIMSampler
|
||||
from iopaint.model.plms_sampler import PLMSSampler
|
||||
from iopaint.schema import InpaintRequest, LDMSampler
|
||||
|
||||
torch.manual_seed(42)
|
||||
import torch.nn as nn
|
||||
from lama_cleaner.helper import (
|
||||
from iopaint.helper import (
|
||||
download_model,
|
||||
norm_img,
|
||||
get_cache_path_by_url,
|
||||
load_jit_model,
|
||||
)
|
||||
from lama_cleaner.model.utils import (
|
||||
from iopaint.model.utils import (
|
||||
make_beta_schedule,
|
||||
timestep_embedding,
|
||||
)
|
@ -7,9 +7,9 @@ import torch
|
||||
import time
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.helper import get_cache_path_by_url, load_jit_model, download_model
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model
|
||||
from iopaint.model.base import InpaintModel
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
|
||||
MANGA_INPAINTOR_MODEL_URL = os.environ.get(
|
@ -8,14 +8,14 @@ import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.utils.checkpoint as checkpoint
|
||||
|
||||
from lama_cleaner.helper import (
|
||||
from iopaint.helper import (
|
||||
load_model,
|
||||
get_cache_path_by_url,
|
||||
norm_img,
|
||||
download_model,
|
||||
)
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
from lama_cleaner.model.utils import (
|
||||
from iopaint.model.base import InpaintModel
|
||||
from iopaint.model.utils import (
|
||||
setup_filter,
|
||||
Conv2dLayer,
|
||||
FullyConnectedLayer,
|
||||
@ -28,7 +28,7 @@ from lama_cleaner.model.utils import (
|
||||
normalize_2nd_moment,
|
||||
set_seed,
|
||||
)
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
|
||||
class ModulatedConv2d(nn.Module):
|
@ -3,7 +3,7 @@ import os
|
||||
import cv2
|
||||
import torch
|
||||
|
||||
from lama_cleaner.helper import (
|
||||
from iopaint.helper import (
|
||||
load_jit_model,
|
||||
download_model,
|
||||
get_cache_path_by_url,
|
||||
@ -11,8 +11,8 @@ from lama_cleaner.helper import (
|
||||
resize_max_size,
|
||||
norm_img,
|
||||
)
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.model.base import InpaintModel
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
MIGAN_MODEL_URL = os.environ.get(
|
||||
"MIGAN_MODEL_URL",
|
@ -1,6 +1,6 @@
|
||||
import cv2
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.model.base import InpaintModel
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
flag_map = {"INPAINT_NS": cv2.INPAINT_NS, "INPAINT_TELEA": cv2.INPAINT_TELEA}
|
||||
|
@ -4,9 +4,9 @@ import cv2
|
||||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.helper import decode_base64_to_image
|
||||
from lama_cleaner.model.base import DiffusionInpaintModel
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.helper import decode_base64_to_image
|
||||
from iopaint.model.base import DiffusionInpaintModel
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
|
||||
class PaintByExample(DiffusionInpaintModel):
|
@ -1,7 +1,7 @@
|
||||
# From: https://github.com/CompVis/latent-diffusion/blob/main/ldm/models/diffusion/plms.py
|
||||
import torch
|
||||
import numpy as np
|
||||
from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
|
||||
from iopaint.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
|
||||
from tqdm import tqdm
|
||||
|
||||
|
@ -4,10 +4,10 @@ import cv2
|
||||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.model.base import DiffusionInpaintModel
|
||||
from lama_cleaner.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
|
||||
from lama_cleaner.model.utils import handle_from_pretrained_exceptions
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.model.base import DiffusionInpaintModel
|
||||
from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
|
||||
from iopaint.model.utils import handle_from_pretrained_exceptions
|
||||
from iopaint.schema import InpaintRequest
|
||||
from .powerpaint_tokenizer import add_task_to_prompt
|
||||
|
||||
|
@ -5,7 +5,7 @@ import random
|
||||
from typing import Any, List, Optional, Union
|
||||
from transformers import CLIPTokenizer
|
||||
|
||||
from lama_cleaner.schema import PowerPaintTask
|
||||
from iopaint.schema import PowerPaintTask
|
||||
|
||||
|
||||
def add_task_to_prompt(prompt, negative_prompt, task: PowerPaintTask):
|
@ -3,10 +3,10 @@ import cv2
|
||||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.model.base import DiffusionInpaintModel
|
||||
from lama_cleaner.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
|
||||
from lama_cleaner.model.utils import handle_from_pretrained_exceptions
|
||||
from lama_cleaner.schema import InpaintRequest, ModelType
|
||||
from iopaint.model.base import DiffusionInpaintModel
|
||||
from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
|
||||
from iopaint.model.utils import handle_from_pretrained_exceptions
|
||||
from iopaint.schema import InpaintRequest, ModelType
|
||||
|
||||
|
||||
class SD(DiffusionInpaintModel):
|
@ -6,9 +6,9 @@ import torch
|
||||
from diffusers import AutoencoderKL
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.model.base import DiffusionInpaintModel
|
||||
from lama_cleaner.model.utils import handle_from_pretrained_exceptions
|
||||
from lama_cleaner.schema import InpaintRequest, ModelType
|
||||
from iopaint.model.base import DiffusionInpaintModel
|
||||
from iopaint.model.utils import handle_from_pretrained_exceptions
|
||||
from iopaint.schema import InpaintRequest, ModelType
|
||||
|
||||
|
||||
class SDXL(DiffusionInpaintModel):
|
@ -27,7 +27,7 @@ from diffusers import (
|
||||
from diffusers.configuration_utils import FrozenDict
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.schema import SDSampler
|
||||
from iopaint.schema import SDSampler
|
||||
from torch import conv2d, conv_transpose2d
|
||||
|
||||
|
@ -5,11 +5,11 @@ import cv2
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from lama_cleaner.helper import get_cache_path_by_url, load_jit_model, download_model
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model
|
||||
from iopaint.schema import InpaintRequest
|
||||
import numpy as np
|
||||
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
from iopaint.model.base import InpaintModel
|
||||
|
||||
ZITS_INPAINT_MODEL_URL = os.environ.get(
|
||||
"ZITS_INPAINT_MODEL_URL",
|
@ -2,13 +2,13 @@ from typing import List
|
||||
|
||||
from pydantic import computed_field, BaseModel
|
||||
|
||||
from lama_cleaner.const import (
|
||||
from iopaint.const import (
|
||||
SDXL_CONTROLNET_CHOICES,
|
||||
SD2_CONTROLNET_CHOICES,
|
||||
SD_CONTROLNET_CHOICES,
|
||||
)
|
||||
from lama_cleaner.model import InstructPix2Pix, Kandinsky22, PowerPaint, SD2
|
||||
from lama_cleaner.schema import ModelType
|
||||
from iopaint.model import InstructPix2Pix, Kandinsky22, PowerPaint, SD2
|
||||
from iopaint.schema import ModelType
|
||||
|
||||
|
||||
class ModelInfo(BaseModel):
|
@ -4,12 +4,12 @@ import torch
|
||||
from loguru import logger
|
||||
import numpy as np
|
||||
|
||||
from lama_cleaner.download import scan_models
|
||||
from lama_cleaner.helper import switch_mps_device
|
||||
from lama_cleaner.model import models, ControlNet, SD, SDXL
|
||||
from lama_cleaner.model.utils import torch_gc
|
||||
from lama_cleaner.model_info import ModelInfo, ModelType
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.download import scan_models
|
||||
from iopaint.helper import switch_mps_device
|
||||
from iopaint.model import models, ControlNet, SD, SDXL
|
||||
from iopaint.model.utils import torch_gc
|
||||
from iopaint.model_info import ModelInfo, ModelType
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
|
||||
class ModelManager:
|
@ -5,9 +5,9 @@ import torch.nn.functional as F
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from lama_cleaner.helper import load_model
|
||||
from lama_cleaner.plugins.base_plugin import BasePlugin
|
||||
from lama_cleaner.schema import RunPluginRequest
|
||||
from iopaint.helper import load_model
|
||||
from iopaint.plugins.base_plugin import BasePlugin
|
||||
from iopaint.schema import RunPluginRequest
|
||||
|
||||
|
||||
class REBNCONV(nn.Module):
|
@ -1,7 +1,7 @@
|
||||
from loguru import logger
|
||||
import numpy as np
|
||||
|
||||
from lama_cleaner.schema import RunPluginRequest
|
||||
from iopaint.schema import RunPluginRequest
|
||||
|
||||
|
||||
class BasePlugin:
|
@ -2,9 +2,9 @@ import cv2
|
||||
import numpy as np
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.helper import download_model
|
||||
from lama_cleaner.plugins.base_plugin import BasePlugin
|
||||
from lama_cleaner.schema import RunPluginRequest
|
||||
from iopaint.helper import download_model
|
||||
from iopaint.plugins.base_plugin import BasePlugin
|
||||
from iopaint.schema import RunPluginRequest
|
||||
|
||||
|
||||
class GFPGANPlugin(BasePlugin):
|
@ -7,10 +7,10 @@ import numpy as np
|
||||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.helper import download_model
|
||||
from lama_cleaner.plugins.base_plugin import BasePlugin
|
||||
from lama_cleaner.plugins.segment_anything import SamPredictor, sam_model_registry
|
||||
from lama_cleaner.schema import RunPluginRequest
|
||||
from iopaint.helper import download_model
|
||||
from iopaint.plugins.base_plugin import BasePlugin
|
||||
from iopaint.plugins.segment_anything import SamPredictor, sam_model_registry
|
||||
from iopaint.schema import RunPluginRequest
|
||||
|
||||
# 从小到大
|
||||
SEGMENT_ANYTHING_MODELS = {
|
@ -5,10 +5,10 @@ import numpy as np
|
||||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.const import RealESRGANModel
|
||||
from lama_cleaner.helper import download_model
|
||||
from lama_cleaner.plugins.base_plugin import BasePlugin
|
||||
from lama_cleaner.schema import RunPluginRequest
|
||||
from iopaint.const import RealESRGANModel
|
||||
from iopaint.helper import download_model
|
||||
from iopaint.plugins.base_plugin import BasePlugin
|
||||
from iopaint.schema import RunPluginRequest
|
||||
|
||||
|
||||
class RealESRGANUpscaler(BasePlugin):
|
@ -3,8 +3,8 @@ import cv2
|
||||
import numpy as np
|
||||
from torch.hub import get_dir
|
||||
|
||||
from lama_cleaner.plugins.base_plugin import BasePlugin
|
||||
from lama_cleaner.schema import RunPluginRequest
|
||||
from iopaint.plugins.base_plugin import BasePlugin
|
||||
from iopaint.schema import RunPluginRequest
|
||||
|
||||
|
||||
class RemoveBG(BasePlugin):
|
@ -2,9 +2,9 @@ import cv2
|
||||
import numpy as np
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.helper import download_model
|
||||
from lama_cleaner.plugins.base_plugin import BasePlugin
|
||||
from lama_cleaner.schema import RunPluginRequest
|
||||
from iopaint.helper import download_model
|
||||
from iopaint.plugins.base_plugin import BasePlugin
|
||||
from iopaint.schema import RunPluginRequest
|
||||
|
||||
|
||||
class RestoreFormerPlugin(BasePlugin):
|
@ -8,7 +8,7 @@ import torch
|
||||
|
||||
from functools import partial
|
||||
|
||||
from lama_cleaner.plugins.segment_anything.modeling.tiny_vit_sam import TinyViT
|
||||
from iopaint.plugins.segment_anything.modeling.tiny_vit_sam import TinyViT
|
||||
|
||||
from .modeling import (
|
||||
ImageEncoderViT,
|
@ -9,7 +9,7 @@ from loguru import logger
|
||||
from rich import print
|
||||
from typing import Dict, Any
|
||||
|
||||
from lama_cleaner.const import Device
|
||||
from iopaint.const import Device
|
||||
|
||||
_PY_VERSION: str = sys.version.split()[0].rstrip("+")
|
||||
|
||||
@ -28,7 +28,7 @@ _CANDIDATES = [
|
||||
"transformers",
|
||||
"opencv-python",
|
||||
"accelerate",
|
||||
"lama-cleaner",
|
||||
"iopaint",
|
||||
"rembg",
|
||||
"realesrgan",
|
||||
"gfpgan",
|
@ -5,7 +5,7 @@ from typing import Optional, Literal, List
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from lama_cleaner.const import Device, InteractiveSegModel, RealESRGANModel
|
||||
from iopaint.const import Device, InteractiveSegModel, RealESRGANModel
|
||||
|
||||
|
||||
class PluginInfo(BaseModel):
|
Before Width: | Height: | Size: 480 KiB After Width: | Height: | Size: 480 KiB |
Before Width: | Height: | Size: 51 KiB After Width: | Height: | Size: 51 KiB |
Before Width: | Height: | Size: 481 KiB After Width: | Height: | Size: 481 KiB |
Before Width: | Height: | Size: 215 KiB After Width: | Height: | Size: 215 KiB |
Before Width: | Height: | Size: 305 KiB After Width: | Height: | Size: 305 KiB |
Before Width: | Height: | Size: 129 KiB After Width: | Height: | Size: 129 KiB |
Before Width: | Height: | Size: 7.7 KiB After Width: | Height: | Size: 7.7 KiB |
Before Width: | Height: | Size: 395 KiB After Width: | Height: | Size: 395 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 38 KiB |
Before Width: | Height: | Size: 69 KiB After Width: | Height: | Size: 69 KiB |
@ -1,6 +1,6 @@
|
||||
import cv2
|
||||
from lama_cleaner.helper import adjust_mask
|
||||
from lama_cleaner.tests.utils import current_dir, save_dir
|
||||
from iopaint.helper import adjust_mask
|
||||
from iopaint.tests.utils import current_dir, save_dir
|
||||
|
||||
mask_p = current_dir / "overture-creations-5sI6fQgYIuo_mask.png"
|
||||
|
@ -1,7 +1,7 @@
|
||||
import os
|
||||
|
||||
from lama_cleaner.const import SD_CONTROLNET_CHOICES
|
||||
from lama_cleaner.tests.utils import current_dir, check_device, get_config, assert_equal
|
||||
from iopaint.const import SD_CONTROLNET_CHOICES
|
||||
from iopaint.tests.utils import current_dir, check_device, get_config, assert_equal
|
||||
|
||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
from pathlib import Path
|
||||
@ -9,8 +9,8 @@ from pathlib import Path
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.schema import HDStrategy, SDSampler
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.schema import HDStrategy, SDSampler
|
||||
|
||||
|
||||
model_name = "runwayml/stable-diffusion-inpainting"
|
@ -3,9 +3,9 @@ from pathlib import Path
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.schema import HDStrategy
|
||||
from lama_cleaner.tests.utils import get_config, check_device, assert_equal, current_dir
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.schema import HDStrategy
|
||||
from iopaint.tests.utils import get_config, check_device, assert_equal, current_dir
|
||||
|
||||
model_name = "timbrooks/instruct-pix2pix"
|
||||
|
@ -1,5 +1,5 @@
|
||||
from lama_cleaner.helper import load_img
|
||||
from lama_cleaner.tests.utils import current_dir
|
||||
from iopaint.helper import load_img
|
||||
from iopaint.tests.utils import current_dir
|
||||
|
||||
png_img_p = current_dir / "image.png"
|
||||
jpg_img_p = current_dir / "bunny.jpeg"
|
@ -1,9 +1,9 @@
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.schema import HDStrategy, LDMSampler
|
||||
from lama_cleaner.tests.utils import assert_equal, get_config, current_dir, check_device
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.schema import HDStrategy, LDMSampler
|
||||
from iopaint.tests.utils import assert_equal, get_config, current_dir, check_device
|
||||
|
||||
|
||||
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])
|
@ -1,6 +1,6 @@
|
||||
def test_load_model():
|
||||
from lama_cleaner.plugins import InteractiveSeg
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from iopaint.plugins import InteractiveSeg
|
||||
from iopaint.model_manager import ModelManager
|
||||
|
||||
interactive_seg_model = InteractiveSeg("vit_l", "cpu")
|
||||
|
@ -1,12 +1,12 @@
|
||||
import os
|
||||
|
||||
from lama_cleaner.schema import InpaintRequest
|
||||
from iopaint.schema import InpaintRequest
|
||||
|
||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
|
||||
import torch
|
||||
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from iopaint.model_manager import ModelManager
|
||||
|
||||
|
||||
def test_model_switch():
|
@ -1,6 +1,6 @@
|
||||
import os
|
||||
|
||||
from lama_cleaner.tests.utils import current_dir, check_device
|
||||
from iopaint.tests.utils import current_dir, check_device
|
||||
|
||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
from pathlib import Path
|
||||
@ -8,9 +8,9 @@ from pathlib import Path
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.schema import HDStrategy, SDSampler
|
||||
from lama_cleaner.tests.test_model import get_config, assert_equal
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.schema import HDStrategy, SDSampler
|
||||
from iopaint.tests.test_model import get_config, assert_equal
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name", ["runwayml/stable-diffusion-inpainting"])
|
@ -2,9 +2,9 @@ import cv2
|
||||
import pytest
|
||||
from PIL import Image
|
||||
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.schema import HDStrategy
|
||||
from lama_cleaner.tests.utils import (
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.schema import HDStrategy
|
||||
from iopaint.tests.utils import (
|
||||
current_dir,
|
||||
get_config,
|
||||
get_data,
|
@ -3,17 +3,17 @@ import os
|
||||
import time
|
||||
from PIL import Image
|
||||
|
||||
from lama_cleaner.helper import encode_pil_to_base64, gen_frontend_mask
|
||||
from lama_cleaner.plugins.anime_seg import AnimeSeg
|
||||
from lama_cleaner.schema import RunPluginRequest
|
||||
from lama_cleaner.tests.utils import check_device, current_dir, save_dir
|
||||
from iopaint.helper import encode_pil_to_base64, gen_frontend_mask
|
||||
from iopaint.plugins.anime_seg import AnimeSeg
|
||||
from iopaint.schema import RunPluginRequest
|
||||
from iopaint.tests.utils import check_device, current_dir, save_dir
|
||||
|
||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
|
||||
import cv2
|
||||
import pytest
|
||||
|
||||
from lama_cleaner.plugins import (
|
||||
from iopaint.plugins import (
|
||||
RemoveBG,
|
||||
RealESRGANUpscaler,
|
||||
GFPGANPlugin,
|
@ -5,7 +5,7 @@ from typing import List
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from lama_cleaner.helper import pil_to_bytes, load_img
|
||||
from iopaint.helper import pil_to_bytes, load_img
|
||||
|
||||
current_dir = Path(__file__).parent.absolute().resolve()
|
||||
|
@ -2,7 +2,7 @@ import os
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.tests.utils import check_device, get_config, assert_equal
|
||||
from iopaint.tests.utils import check_device, get_config, assert_equal
|
||||
|
||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
from pathlib import Path
|
||||
@ -10,8 +10,8 @@ from pathlib import Path
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.schema import HDStrategy, SDSampler, FREEUConfig
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.schema import HDStrategy, SDSampler, FREEUConfig
|
||||
|
||||
current_dir = Path(__file__).parent.absolute().resolve()
|
||||
save_dir = current_dir / "result"
|
@ -1,15 +1,15 @@
|
||||
import os
|
||||
|
||||
from lama_cleaner.tests.utils import check_device, current_dir
|
||||
from iopaint.tests.utils import check_device, current_dir
|
||||
|
||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from lama_cleaner.model_manager import ModelManager
|
||||
from lama_cleaner.schema import HDStrategy, SDSampler, FREEUConfig
|
||||
from lama_cleaner.tests.test_model import get_config, assert_equal
|
||||
from iopaint.model_manager import ModelManager
|
||||
from iopaint.schema import HDStrategy, SDSampler, FREEUConfig
|
||||
from iopaint.tests.test_model import get_config, assert_equal
|
||||
|
||||
|
||||
@pytest.mark.parametrize("device", ["cuda", "mps"])
|
@ -3,8 +3,8 @@ import cv2
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from lama_cleaner.helper import encode_pil_to_base64
|
||||
from lama_cleaner.schema import LDMSampler, HDStrategy, InpaintRequest, SDSampler
|
||||
from iopaint.helper import encode_pil_to_base64
|
||||
from iopaint.schema import LDMSampler, HDStrategy, InpaintRequest, SDSampler
|
||||
from PIL import Image
|
||||
|
||||
current_dir = Path(__file__).parent.absolute().resolve()
|
@ -5,7 +5,7 @@ from datetime import datetime
|
||||
import gradio as gr
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.const import *
|
||||
from iopaint.const import *
|
||||
|
||||
_config_file = None
|
||||
|
2
main.py
@ -1,4 +1,4 @@
|
||||
from lama_cleaner import entry_point
|
||||
from iopaint import entry_point
|
||||
|
||||
if __name__ == "__main__":
|
||||
entry_point()
|
||||
|
@ -4,6 +4,8 @@ set -e
|
||||
pushd ./web_app
|
||||
npm run build
|
||||
popd
|
||||
rm -r ./iopaint/web_app
|
||||
cp -r web_app/dist ./iopaint/web_app
|
||||
|
||||
rm -r -f dist
|
||||
python3 setup.py sdist bdist_wheel
|
||||
|
@ -6,7 +6,6 @@ safetensors
|
||||
controlnet-aux==0.0.3
|
||||
fastapi==0.108.0
|
||||
python-socketio==5.7.2
|
||||
flaskwebgui==0.3.5
|
||||
typer
|
||||
pydantic
|
||||
rich
|
||||
|