diff --git a/.gitignore b/.gitignore index d166b2a..e165890 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,9 @@ examples/ .idea/ .vscode/ build -!lama_cleaner/app/build +!iopaint/app/build dist/ -lama_cleaner.egg-info/ +IOPaint.egg-info/ venv/ +tmp/ +iopaint/web_app/ diff --git a/README.md b/README.md index e82a12a..5b3587d 100644 --- a/README.md +++ b/README.md @@ -1,76 +1,75 @@ -

- logo -

-

Lama Cleaner

-

A free and open-source inpainting tool powered by SOTA AI model.

+

IOPaint

+

A free and open-source inpainting & outpainting tool powered by SOTA AI model.

- - total download + + total download - - version + + version - - Open in Colab - - - - Hugging Face Spaces - - - python version - - - version + python version

-https://user-images.githubusercontent.com/3998421/196976498-ba1ad3ab-fa18-4c55-965f-5c6683141375.mp4 - -## Features - -- Completely free and open-source, fully self-hosted, support CPU & GPU & M1/2 -- [Windows 1-Click Installer](https://lama-cleaner-docs.vercel.app/install/windows_1click_installer) -- [Native macOS app](https://opticlean.io/) -- Multiple SOTA AI [models](https://lama-cleaner-docs.vercel.app/models) - - Erase model: LaMa/LDM/ZITS/MAT/FcF/Manga - - Erase and Replace model: Stable Diffusion/Paint by Example -- [Plugins](https://lama-cleaner-docs.vercel.app/plugins) for post-processing: - - [RemoveBG](https://lama-cleaner-docs.vercel.app/plugins/rembg): Remove images background - - [RealESRGAN](https://lama-cleaner-docs.vercel.app/plugins/RealESRGAN): Super Resolution - - [GFPGAN](https://lama-cleaner-docs.vercel.app/plugins/GFPGAN): Face Restoration - - [RestoreFormer](https://lama-cleaner-docs.vercel.app/plugins/RestoreFormer): Face Restoration - - [Segment Anything](https://lama-cleaner-docs.vercel.app/plugins/interactive_seg): Accurate and fast interactive object segmentation -- [FileManager](https://lama-cleaner-docs.vercel.app/features/file_manager): Browse your pictures conveniently and save them directly to the output directory. -- [Docker Image](https://lama-cleaner-docs.vercel.app/install/docker) -- More features at [lama-cleaner-docs](https://lama-cleaner-docs.vercel.app/) +

+ python version +

## Quick Start -Lama Cleaner make it easy to use SOTA AI model in just two commands: +### Start webui + +IOPaint provides a convenient webui for using the latest AI models to edit your images. +You can install and start IOPaint easily by running following command: ```bash -# In order to use the GPU, install cuda version of pytorch first. -# pip install torch==1.13.1+cu117 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu117 -pip install lama-cleaner -lama-cleaner --model=lama --device=cpu --port=8080 +# In order to use GPU, install cuda version of pytorch first. +# pip3 install torch==2.1.2 torchvision==0.16.2 --index-url https://download.pytorch.org/whl/cu118 +# AMD GPU users, please utilize the following command, only works on linux, as pytorch is not yet supported on Windows with ROCm. +# pip3 install torch==2.1.2 torchvision==0.16.2 --index-url https://download.pytorch.org/whl/rocm5.6 + +pip3 install iopaint +iopaint start --model=lama --device=cpu --port=8080 ``` -That's it, Lama Cleaner is now running at http://localhost:8080 +That's it, you can start using IOPaint by visiting http://localhost:8080 in your web browser. -See all command line arguments at [lama-cleaner-docs](https://lama-cleaner-docs.vercel.app/install/pip) +### Batch processing -## Development +You can also use IOPaint in the command line to batch process images: -Only needed if you plan to modify the frontend and recompile yourself. +```bash +iopaint run --model=lama --device=cpu \ +--input=/path/to/image_folder \ +--mask=/path/to/mask_folder \ +--output=output_dir +``` -### Frontend +`--input` is the folder containing input images, `--mask` is the folder containing corresponding mask images. +When `--mask` is a path to a mask file, all images will be processed using this mask. -Frontend code are modified from [cleanup.pictures](https://github.com/initml/cleanup.pictures), You can experience their -great online services [here](https://cleanup.pictures/). +You can see more information about the available models and plugins supported by IOPaint below. -- Install dependencies:`cd lama_cleaner/app/ && pnpm install` -- Start development server: `pnpm start` -- Build: `pnpm build` +## Features + +- Completely free and open-source, fully self-hosted, support CPU & GPU & Apple Silicon +- Supports various AI models: + - Erase models: These models can be used to remove unwanted object, defect, watermarks, people from image. + - Stable Diffusion models: You can use any Stable Diffusion Inpainting(or normal) models from [Huggingface](https://huggingface.co/models?other=stable-diffusion) in IOPaint. + Some popular used models include: - [runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting) - [diffusers/stable-diffusion-xl-1.0-inpainting-0.1](https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1) - [andregn/Realistic_Vision_V3.0-inpainting](https://huggingface.co/andregn/Realistic_Vision_V3.0-inpainting) - [Lykon/dreamshaper-8-inpainting](https://huggingface.co/Lykon/dreamshaper-8-inpainting) - [Sanster/anything-4.0-inpainting](https://huggingface.co/Sanster/anything-4.0-inpainting) - [Sanster/PowerPaint-V1-stable-diffusion-inpainting](https://huggingface.co/Sanster/PowerPaint-V1-stable-diffusion-inpainting) + - Other Diffusion models: + - [Sanster/AnyText](https://huggingface.co/Sanster/AnyText) + - [timbrooks/instruct-pix2pix](https://huggingface.co/timbrooks/instruct-pix2pix) + - [Fantasy-Studio/Paint-by-Example](https://huggingface.co/Fantasy-Studio/Paint-by-Example) + - [kandinsky-community/kandinsky-2-2-decoder-inpaint](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder-inpaint) +- Plugins + - [Segment Anything](https://iopaint.com/plugins/interactive_seg): Accurate and fast interactive object segmentation + - [RemoveBG](https://iopaint.com/plugins/rembg): Remove image background or generate masks for foreground objects + - [Anime Segmentation](https://iopaint.com/plugins/anime_seg): Similar to RemoveBG, the model is specifically trained for anime images. + - [RealESRGAN](https://iopaint.com/plugins/RealESRGAN): Super Resolution + - [GFPGAN](https://iopaint.com/plugins/GFPGAN): Face Restoration + - [RestoreFormer](https://iopaint.com/plugins/RestoreFormer): Face Restoration +- [FileManager](https://iopaint.com/features/file_manager): Browse your pictures conveniently and save them directly to the output directory. +- [Native macOS app](https://opticlean.io/) for erase task diff --git a/assets/GitHub_Copilot_logo.svg b/assets/GitHub_Copilot_logo.svg deleted file mode 100644 index 24b6613..0000000 --- a/assets/GitHub_Copilot_logo.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/assets/dark.jpg b/assets/dark.jpg deleted file mode 100644 index e0248f6..0000000 Binary files a/assets/dark.jpg and /dev/null differ diff --git a/assets/logo.png b/assets/logo.png deleted file mode 100644 index ecbb5b0..0000000 Binary files a/assets/logo.png and /dev/null differ diff --git a/iopaint/__init__.py b/iopaint/__init__.py new file mode 100644 index 0000000..d3049f8 --- /dev/null +++ b/iopaint/__init__.py @@ -0,0 +1,23 @@ +import os + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" +# https://github.com/pytorch/pytorch/issues/27971#issuecomment-1768868068 +os.environ["ONEDNN_PRIMITIVE_CACHE_CAPACITY"] = "1" +os.environ["LRU_CACHE_CAPACITY"] = "1" +# prevent CPU memory leak when run model on GPU +# https://github.com/pytorch/pytorch/issues/98688#issuecomment-1869288431 +# https://github.com/pytorch/pytorch/issues/108334#issuecomment-1752763633 +os.environ["TORCH_CUDNN_V8_API_LRU_CACHE_LIMIT"] = "1" + + +import warnings + +warnings.simplefilter("ignore", UserWarning) + + +def entry_point(): + # To make os.environ["XDG_CACHE_HOME"] = args.model_cache_dir works for diffusers + # https://github.com/huggingface/diffusers/blob/be99201a567c1ccd841dc16fb24e88f7f239c187/src/diffusers/utils/constants.py#L18 + from iopaint.cli import typer_app + + typer_app() diff --git a/lama_cleaner/__main__.py b/iopaint/__main__.py similarity index 55% rename from lama_cleaner/__main__.py rename to iopaint/__main__.py index f57b35f..3b76d32 100644 --- a/lama_cleaner/__main__.py +++ b/iopaint/__main__.py @@ -1,4 +1,4 @@ -from lama_cleaner import entry_point +from iopaint import entry_point if __name__ == "__main__": entry_point() diff --git a/iopaint/api.py b/iopaint/api.py new file mode 100644 index 0000000..a69a7f8 --- /dev/null +++ b/iopaint/api.py @@ -0,0 +1,400 @@ +import asyncio +import os +import threading +import time +import traceback +from pathlib import Path +from typing import Optional, Dict, List + +import cv2 +import numpy as np +import socketio +import torch + +try: + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_nvfuser_enabled(False) +except: + pass + + +import uvicorn +from PIL import Image +from fastapi import APIRouter, FastAPI, Request, UploadFile +from fastapi.encoders import jsonable_encoder +from fastapi.exceptions import HTTPException +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse, FileResponse, Response +from fastapi.staticfiles import StaticFiles +from loguru import logger +from socketio import AsyncServer + +from iopaint.file_manager import FileManager +from iopaint.helper import ( + load_img, + decode_base64_to_image, + pil_to_bytes, + numpy_to_bytes, + concat_alpha_channel, + gen_frontend_mask, + adjust_mask, +) +from iopaint.model.utils import torch_gc +from iopaint.model_info import ModelInfo +from iopaint.model_manager import ModelManager +from iopaint.plugins import build_plugins +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import ( + GenInfoResponse, + ApiConfig, + ServerConfigResponse, + SwitchModelRequest, + InpaintRequest, + RunPluginRequest, + SDSampler, + PluginInfo, + AdjustMaskRequest, +) + +CURRENT_DIR = Path(__file__).parent.absolute().resolve() +WEB_APP_DIR = CURRENT_DIR / "web_app" + + +def api_middleware(app: FastAPI): + rich_available = False + try: + if os.environ.get("WEBUI_RICH_EXCEPTIONS", None) is not None: + import anyio # importing just so it can be placed on silent list + import starlette # importing just so it can be placed on silent list + from rich.console import Console + + console = Console() + rich_available = True + except Exception: + pass + + def handle_exception(request: Request, e: Exception): + err = { + "error": type(e).__name__, + "detail": vars(e).get("detail", ""), + "body": vars(e).get("body", ""), + "errors": str(e), + } + if not isinstance( + e, HTTPException + ): # do not print backtrace on known httpexceptions + message = f"API error: {request.method}: {request.url} {err}" + if rich_available: + print(message) + console.print_exception( + show_locals=True, + max_frames=2, + extra_lines=1, + suppress=[anyio, starlette], + word_wrap=False, + width=min([console.width, 200]), + ) + else: + traceback.print_exc() + return JSONResponse( + status_code=vars(e).get("status_code", 500), content=jsonable_encoder(err) + ) + + @app.middleware("http") + async def exception_handling(request: Request, call_next): + try: + return await call_next(request) + except Exception as e: + return handle_exception(request, e) + + @app.exception_handler(Exception) + async def fastapi_exception_handler(request: Request, e: Exception): + return handle_exception(request, e) + + @app.exception_handler(HTTPException) + async def http_exception_handler(request: Request, e: HTTPException): + return handle_exception(request, e) + + cors_options = { + "allow_methods": ["*"], + "allow_headers": ["*"], + "allow_origins": ["*"], + "allow_credentials": True, + } + app.add_middleware(CORSMiddleware, **cors_options) + + +global_sio: AsyncServer = None + + +def diffuser_callback(pipe, step: int, timestep: int, callback_kwargs: Dict = {}): + # self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict + # logger.info(f"diffusion callback: step={step}, timestep={timestep}") + + # We use asyncio loos for task processing. Perhaps in the future, we can add a processing queue similar to InvokeAI, + # but for now let's just start a separate event loop. It shouldn't make a difference for single person use + asyncio.run(global_sio.emit("diffusion_progress", {"step": step})) + return {} + + +class Api: + def __init__(self, app: FastAPI, config: ApiConfig): + self.app = app + self.config = config + self.router = APIRouter() + self.queue_lock = threading.Lock() + api_middleware(self.app) + + self.file_manager = self._build_file_manager() + self.plugins = self._build_plugins() + self.model_manager = self._build_model_manager() + + # fmt: off + self.add_api_route("/api/v1/gen-info", self.api_geninfo, methods=["POST"], response_model=GenInfoResponse) + self.add_api_route("/api/v1/server-config", self.api_server_config, methods=["GET"], response_model=ServerConfigResponse) + self.add_api_route("/api/v1/models", self.api_models, methods=["GET"], response_model=List[ModelInfo]) + self.add_api_route("/api/v1/model", self.api_current_model, methods=["GET"], response_model=ModelInfo) + self.add_api_route("/api/v1/model", self.api_switch_model, methods=["POST"], response_model=ModelInfo) + self.add_api_route("/api/v1/inputimage", self.api_input_image, methods=["GET"]) + self.add_api_route("/api/v1/inpaint", self.api_inpaint, methods=["POST"]) + self.add_api_route("/api/v1/run_plugin_gen_mask", self.api_run_plugin_gen_mask, methods=["POST"]) + self.add_api_route("/api/v1/run_plugin_gen_image", self.api_run_plugin_gen_image, methods=["POST"]) + self.add_api_route("/api/v1/samplers", self.api_samplers, methods=["GET"]) + self.add_api_route("/api/v1/adjust_mask", self.api_adjust_mask, methods=["POST"]) + self.app.mount("/", StaticFiles(directory=WEB_APP_DIR, html=True), name="assets") + # fmt: on + + global global_sio + self.sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*") + self.combined_asgi_app = socketio.ASGIApp(self.sio, self.app) + self.app.mount("/ws", self.combined_asgi_app) + global_sio = self.sio + + def add_api_route(self, path: str, endpoint, **kwargs): + return self.app.add_api_route(path, endpoint, **kwargs) + + def api_models(self) -> List[ModelInfo]: + return self.model_manager.scan_models() + + def api_current_model(self) -> ModelInfo: + return self.model_manager.current_model + + def api_switch_model(self, req: SwitchModelRequest) -> ModelInfo: + if req.name == self.model_manager.name: + return self.model_manager.current_model + self.model_manager.switch(req.name) + return self.model_manager.current_model + + def api_server_config(self) -> ServerConfigResponse: + return ServerConfigResponse( + plugins=[ + PluginInfo( + name=it.name, + support_gen_image=it.support_gen_image, + support_gen_mask=it.support_gen_mask, + ) + for it in self.plugins.values() + ], + enableFileManager=self.file_manager is not None, + enableAutoSaving=self.config.output_dir is not None, + enableControlnet=self.model_manager.enable_controlnet, + controlnetMethod=self.model_manager.controlnet_method, + disableModelSwitch=False, + isDesktop=False, + samplers=self.api_samplers(), + ) + + def api_input_image(self) -> FileResponse: + if self.config.input and self.config.input.is_file(): + return FileResponse(self.config.input) + raise HTTPException(status_code=404, detail="Input image not found") + + def api_geninfo(self, file: UploadFile) -> GenInfoResponse: + _, _, info = load_img(file.file.read(), return_info=True) + parts = info.get("parameters", "").split("Negative prompt: ") + prompt = parts[0].strip() + negative_prompt = "" + if len(parts) > 1: + negative_prompt = parts[1].split("\n")[0].strip() + return GenInfoResponse(prompt=prompt, negative_prompt=negative_prompt) + + def api_inpaint(self, req: InpaintRequest): + image, alpha_channel, infos = decode_base64_to_image(req.image) + mask, _, _ = decode_base64_to_image(req.mask, gray=True) + + mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1] + if image.shape[:2] != mask.shape[:2]: + raise HTTPException( + 400, + detail=f"Image size({image.shape[:2]}) and mask size({mask.shape[:2]}) not match.", + ) + + if req.paint_by_example_example_image: + paint_by_example_image, _, _ = decode_base64_to_image( + req.paint_by_example_example_image + ) + + start = time.time() + rgb_np_img = self.model_manager(image, mask, req) + logger.info(f"process time: {(time.time() - start) * 1000:.2f}ms") + torch_gc() + + rgb_np_img = cv2.cvtColor(rgb_np_img.astype(np.uint8), cv2.COLOR_BGR2RGB) + rgb_res = concat_alpha_channel(rgb_np_img, alpha_channel) + + ext = "png" + res_img_bytes = pil_to_bytes( + Image.fromarray(rgb_res), + ext=ext, + quality=self.config.quality, + infos=infos, + ) + + asyncio.run(self.sio.emit("diffusion_finish")) + + return Response( + content=res_img_bytes, + media_type=f"image/{ext}", + headers={"X-Seed": str(req.sd_seed)}, + ) + + def api_run_plugin_gen_image(self, req: RunPluginRequest): + ext = "png" + if req.name not in self.plugins: + raise HTTPException(status_code=422, detail="Plugin not found") + if not self.plugins[req.name].support_gen_image: + raise HTTPException( + status_code=422, detail="Plugin does not support output image" + ) + rgb_np_img, alpha_channel, infos = decode_base64_to_image(req.image) + bgr_or_rgba_np_img = self.plugins[req.name].gen_image(rgb_np_img, req) + torch_gc() + + if bgr_or_rgba_np_img.shape[2] == 4: + rgba_np_img = bgr_or_rgba_np_img + else: + rgba_np_img = cv2.cvtColor(bgr_or_rgba_np_img, cv2.COLOR_BGR2RGB) + rgba_np_img = concat_alpha_channel(rgba_np_img, alpha_channel) + + return Response( + content=pil_to_bytes( + Image.fromarray(rgba_np_img), + ext=ext, + quality=self.config.quality, + infos=infos, + ), + media_type=f"image/{ext}", + ) + + def api_run_plugin_gen_mask(self, req: RunPluginRequest): + if req.name not in self.plugins: + raise HTTPException(status_code=422, detail="Plugin not found") + if not self.plugins[req.name].support_gen_mask: + raise HTTPException( + status_code=422, detail="Plugin does not support output image" + ) + rgb_np_img, alpha_channel, infos = decode_base64_to_image(req.image) + bgr_or_gray_mask = self.plugins[req.name].gen_mask(rgb_np_img, req) + torch_gc() + res_mask = gen_frontend_mask(bgr_or_gray_mask) + return Response( + content=numpy_to_bytes(res_mask, "png"), + media_type="image/png", + ) + + def api_samplers(self) -> List[str]: + return [member.value for member in SDSampler.__members__.values()] + + def api_adjust_mask(self, req: AdjustMaskRequest): + mask, _, _ = decode_base64_to_image(req.mask, gray=True) + mask = adjust_mask(mask, req.kernel_size, req.operate) + return Response(content=numpy_to_bytes(mask, "png"), media_type="image/png") + + def launch(self): + self.app.include_router(self.router) + uvicorn.run( + self.combined_asgi_app, + host=self.config.host, + port=self.config.port, + timeout_keep_alive=999999999, + ) + + def _build_file_manager(self) -> Optional[FileManager]: + if self.config.input and self.config.input.is_dir(): + logger.info( + f"Input is directory, initialize file manager {self.config.input}" + ) + + return FileManager( + app=self.app, + input_dir=self.config.input, + output_dir=self.config.output_dir, + ) + return None + + def _build_plugins(self) -> Dict[str, BasePlugin]: + return build_plugins( + self.config.enable_interactive_seg, + self.config.interactive_seg_model, + self.config.interactive_seg_device, + self.config.enable_remove_bg, + self.config.enable_anime_seg, + self.config.enable_realesrgan, + self.config.realesrgan_device, + self.config.realesrgan_model, + self.config.enable_gfpgan, + self.config.gfpgan_device, + self.config.enable_restoreformer, + self.config.restoreformer_device, + self.config.no_half, + ) + + def _build_model_manager(self): + return ModelManager( + name=self.config.model, + device=torch.device(self.config.device), + no_half=self.config.no_half, + low_mem=self.config.low_mem, + disable_nsfw=self.config.disable_nsfw_checker, + sd_cpu_textencoder=self.config.cpu_textencoder, + local_files_only=self.config.local_files_only, + cpu_offload=self.config.cpu_offload, + callback=diffuser_callback, + ) + + +if __name__ == "__main__": + from iopaint.schema import InteractiveSegModel, RealESRGANModel + + app = FastAPI() + api = Api( + app, + ApiConfig( + host="127.0.0.1", + port=8080, + model="lama", + no_half=False, + cpu_offload=False, + disable_nsfw_checker=False, + cpu_textencoder=False, + device="cpu", + input="/Users/cwq/code/github/MI-GAN/examples/places2_512_object/images", + output_dir="/Users/cwq/code/github/lama-cleaner/tmp", + quality=100, + enable_interactive_seg=False, + interactive_seg_model=InteractiveSegModel.vit_b, + interactive_seg_device="cpu", + enable_remove_bg=False, + enable_anime_seg=False, + enable_realesrgan=False, + realesrgan_device="cpu", + realesrgan_model=RealESRGANModel.realesr_general_x4v3, + enable_gfpgan=False, + gfpgan_device="cpu", + enable_restoreformer=False, + restoreformer_device="cpu", + ), + ) + api.launch() diff --git a/iopaint/batch_processing.py b/iopaint/batch_processing.py new file mode 100644 index 0000000..393a720 --- /dev/null +++ b/iopaint/batch_processing.py @@ -0,0 +1,127 @@ +import json +from pathlib import Path +from typing import Dict, Optional + +import cv2 +import psutil +from PIL import Image +from loguru import logger +from rich.console import Console +from rich.progress import ( + Progress, + SpinnerColumn, + TimeElapsedColumn, + MofNCompleteColumn, + TextColumn, + BarColumn, + TaskProgressColumn, +) + +from iopaint.helper import pil_to_bytes +from iopaint.model.utils import torch_gc +from iopaint.model_manager import ModelManager +from iopaint.schema import InpaintRequest + + +def glob_images(path: Path) -> Dict[str, Path]: + # png/jpg/jpeg + if path.is_file(): + return {path.stem: path} + elif path.is_dir(): + res = {} + for it in path.glob("*.*"): + if it.suffix.lower() in [".png", ".jpg", ".jpeg"]: + res[it.stem] = it + return res + + +def batch_inpaint( + model: str, + device, + image: Path, + mask: Path, + output: Path, + config: Optional[Path] = None, + concat: bool = False, +): + if image.is_dir() and output.is_file(): + logger.error( + f"invalid --output: when image is a directory, output should be a directory" + ) + exit(-1) + output.mkdir(parents=True, exist_ok=True) + + image_paths = glob_images(image) + mask_paths = glob_images(mask) + if len(image_paths) == 0: + logger.error(f"invalid --image: empty image folder") + exit(-1) + if len(mask_paths) == 0: + logger.error(f"invalid --mask: empty mask folder") + exit(-1) + + if config is None: + inpaint_request = InpaintRequest() + logger.info(f"Using default config: {inpaint_request}") + else: + with open(config, "r", encoding="utf-8") as f: + inpaint_request = InpaintRequest(**json.load(f)) + + model_manager = ModelManager(name=model, device=device) + first_mask = list(mask_paths.values())[0] + + console = Console() + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TaskProgressColumn(), + MofNCompleteColumn(), + TimeElapsedColumn(), + console=console, + transient=False, + ) as progress: + task = progress.add_task("Batch processing...", total=len(image_paths)) + for stem, image_p in image_paths.items(): + if stem not in mask_paths and mask.is_dir(): + progress.log(f"mask for {image_p} not found") + progress.update(task, advance=1) + continue + mask_p = mask_paths.get(stem, first_mask) + + infos = Image.open(image_p).info + + img = cv2.imread(str(image_p)) + img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB) + mask_img = cv2.imread(str(mask_p), cv2.IMREAD_GRAYSCALE) + if mask_img.shape[:2] != img.shape[:2]: + progress.log( + f"resize mask {mask_p.name} to image {image_p.name} size: {img.shape[:2]}" + ) + mask_img = cv2.resize( + mask_img, + (img.shape[1], img.shape[0]), + interpolation=cv2.INTER_NEAREST, + ) + mask_img[mask_img >= 127] = 255 + mask_img[mask_img < 127] = 0 + + # bgr + inpaint_result = model_manager(img, mask_img, inpaint_request) + inpaint_result = cv2.cvtColor(inpaint_result, cv2.COLOR_BGR2RGB) + if concat: + mask_img = cv2.cvtColor(mask_img, cv2.COLOR_GRAY2RGB) + inpaint_result = cv2.hconcat([img, mask_img, inpaint_result]) + + img_bytes = pil_to_bytes(Image.fromarray(inpaint_result), "png", 100, infos) + save_p = output / f"{stem}.png" + with open(save_p, "wb") as fw: + fw.write(img_bytes) + + progress.update(task, advance=1) + torch_gc() + # pid = psutil.Process().pid + # memory_info = psutil.Process(pid).memory_info() + # memory_in_mb = memory_info.rss / (1024 * 1024) + # print(f"原图大小:{img.shape},当前进程的内存占用:{memory_in_mb}MB") diff --git a/lama_cleaner/benchmark.py b/iopaint/benchmark.py similarity index 89% rename from lama_cleaner/benchmark.py rename to iopaint/benchmark.py index a0a170e..0205c60 100644 --- a/lama_cleaner/benchmark.py +++ b/iopaint/benchmark.py @@ -9,8 +9,8 @@ import nvidia_smi import psutil import torch -from lama_cleaner.model_manager import ModelManager -from lama_cleaner.schema import Config, HDStrategy, SDSampler +from iopaint.model_manager import ModelManager +from iopaint.schema import InpaintRequest, HDStrategy, SDSampler try: torch._C._jit_override_can_fuse_on_cpu(False) @@ -36,7 +36,7 @@ def run_model(model, size): image = np.random.randint(0, 256, (size[0], size[1], 3)).astype(np.uint8) mask = np.random.randint(0, 255, size).astype(np.uint8) - config = Config( + config = InpaintRequest( ldm_steps=2, hd_strategy=HDStrategy.ORIGINAL, hd_strategy_crop_margin=128, @@ -44,7 +44,7 @@ def run_model(model, size): hd_strategy_resize_limit=128, prompt="a fox is sitting on a bench", sd_steps=5, - sd_sampler=SDSampler.ddim + sd_sampler=SDSampler.ddim, ) model(image, mask, config) @@ -75,7 +75,9 @@ def benchmark(model, times: int, empty_cache: bool): # cpu_metrics.append(process.cpu_percent()) time_metrics.append((time.time() - start) * 1000) memory_metrics.append(process.memory_info().rss / 1024 / 1024) - gpu_memory_metrics.append(nvidia_smi.nvmlDeviceGetMemoryInfo(handle).used / 1024 / 1024) + gpu_memory_metrics.append( + nvidia_smi.nvmlDeviceGetMemoryInfo(handle).used / 1024 / 1024 + ) print(f"size: {size}".center(80, "-")) # print(f"cpu: {format(cpu_metrics)}") @@ -101,9 +103,7 @@ if __name__ == "__main__": model = ModelManager( name=args.name, device=device, - sd_run_local=True, disable_nsfw=True, sd_cpu_textencoder=True, - hf_access_token="123" ) benchmark(model, args.times, args.empty_cache) diff --git a/iopaint/cli.py b/iopaint/cli.py new file mode 100644 index 0000000..20f6ea7 --- /dev/null +++ b/iopaint/cli.py @@ -0,0 +1,207 @@ +from pathlib import Path +from typing import Dict, Optional + +import typer +from fastapi import FastAPI +from loguru import logger +from typer import Option +from typer_config import use_json_config + +from iopaint.const import * +from iopaint.runtime import setup_model_dir, dump_environment_info, check_device +from iopaint.schema import InteractiveSegModel, Device, RealESRGANModel + +typer_app = typer.Typer(pretty_exceptions_show_locals=False, add_completion=False) + + +@typer_app.command(help="Install all plugins dependencies") +def install_plugins_packages(): + from iopaint.installer import install_plugins_package + + install_plugins_package() + + +@typer_app.command(help="Download SD/SDXL normal/inpainting model from HuggingFace") +def download( + model: str = Option( + ..., help="Model id on HuggingFace e.g: runwayml/stable-diffusion-inpainting" + ), + model_dir: Path = Option( + DEFAULT_MODEL_DIR, + help=MODEL_DIR_HELP, + file_okay=False, + callback=setup_model_dir, + ), +): + from iopaint.download import cli_download_model + + cli_download_model(model) + + +@typer_app.command(name="list", help="List downloaded models") +def list_model( + model_dir: Path = Option( + DEFAULT_MODEL_DIR, + help=MODEL_DIR_HELP, + file_okay=False, + callback=setup_model_dir, + ), +): + from iopaint.download import scan_models + + scanned_models = scan_models() + for it in scanned_models: + print(it.name) + + +@typer_app.command(help="Batch processing images") +def run( + model: str = Option("lama"), + device: Device = Option(Device.cpu), + image: Path = Option(..., help="Image folders or file path"), + mask: Path = Option( + ..., + help="Mask folders or file path. " + "If it is a directory, the mask images in the directory should have the same name as the original image." + "If it is a file, all images will use this mask." + "Mask will automatically resize to the same size as the original image.", + ), + output: Path = Option(..., help="Output directory or file path"), + config: Path = Option( + None, help="Config file path. You can use dump command to create a base config." + ), + concat: bool = Option( + False, help="Concat original image, mask and output images into one image" + ), + model_dir: Path = Option( + DEFAULT_MODEL_DIR, + help=MODEL_DIR_HELP, + file_okay=False, + callback=setup_model_dir, + ), +): + from iopaint.download import cli_download_model, scan_models + + scanned_models = scan_models() + if model not in [it.name for it in scanned_models]: + logger.info(f"{model} not found in {model_dir}, try to downloading") + cli_download_model(model) + + from iopaint.batch_processing import batch_inpaint + + batch_inpaint(model, device, image, mask, output, config, concat) + + +@typer_app.command(help="Start IOPaint server") +@use_json_config() +def start( + host: str = Option("127.0.0.1"), + port: int = Option(8080), + model: str = Option( + DEFAULT_MODEL, + help=f"Erase models: [{', '.join(AVAILABLE_MODELS)}].\n" + f"Diffusion models: [{', '.join(DIFFUSION_MODELS)}] or any SD/SDXL normal/inpainting models on HuggingFace.", + ), + model_dir: Path = Option( + DEFAULT_MODEL_DIR, + help=MODEL_DIR_HELP, + dir_okay=True, + file_okay=False, + callback=setup_model_dir, + ), + low_mem: bool = Option(False, help=LOW_MEM_HELP), + no_half: bool = Option(False, help=NO_HALF_HELP), + cpu_offload: bool = Option(False, help=CPU_OFFLOAD_HELP), + disable_nsfw_checker: bool = Option(False, help=DISABLE_NSFW_HELP), + cpu_textencoder: bool = Option(False, help=CPU_TEXTENCODER_HELP), + local_files_only: bool = Option(False, help=LOCAL_FILES_ONLY_HELP), + device: Device = Option(Device.cpu), + input: Optional[Path] = Option(None, help=INPUT_HELP), + output_dir: Optional[Path] = Option( + None, help=OUTPUT_DIR_HELP, dir_okay=True, file_okay=False + ), + quality: int = Option(95, help=QUALITY_HELP), + enable_interactive_seg: bool = Option(False, help=INTERACTIVE_SEG_HELP), + interactive_seg_model: InteractiveSegModel = Option( + InteractiveSegModel.vit_b, help=INTERACTIVE_SEG_MODEL_HELP + ), + interactive_seg_device: Device = Option(Device.cpu), + enable_remove_bg: bool = Option(False, help=REMOVE_BG_HELP), + enable_anime_seg: bool = Option(False, help=ANIMESEG_HELP), + enable_realesrgan: bool = Option(False), + realesrgan_device: Device = Option(Device.cpu), + realesrgan_model: RealESRGANModel = Option(RealESRGANModel.realesr_general_x4v3), + enable_gfpgan: bool = Option(False), + gfpgan_device: Device = Option(Device.cpu), + enable_restoreformer: bool = Option(False), + restoreformer_device: Device = Option(Device.cpu), +): + dump_environment_info() + device = check_device(device) + if input and not input.exists(): + logger.error(f"invalid --input: {input} not exists") + exit() + if output_dir: + output_dir = output_dir.expanduser().absolute() + logger.info(f"Image will be saved to {output_dir}") + if not output_dir.exists(): + logger.info(f"Create output directory {output_dir}") + output_dir.mkdir(parents=True) + + model_dir = model_dir.expanduser().absolute() + + if local_files_only: + os.environ["TRANSFORMERS_OFFLINE"] = "1" + os.environ["HF_HUB_OFFLINE"] = "1" + + from iopaint.download import cli_download_model, scan_models + + scanned_models = scan_models() + if model not in [it.name for it in scanned_models]: + logger.info(f"{model} not found in {model_dir}, try to downloading") + cli_download_model(model) + + from iopaint.api import Api + from iopaint.schema import ApiConfig + + app = FastAPI() + api_config = ApiConfig( + host=host, + port=port, + model=model, + no_half=no_half, + low_mem=low_mem, + cpu_offload=cpu_offload, + disable_nsfw_checker=disable_nsfw_checker, + local_files_only=local_files_only, + cpu_textencoder=cpu_textencoder if device == Device.cuda else False, + device=device, + input=input, + output_dir=output_dir, + quality=quality, + enable_interactive_seg=enable_interactive_seg, + interactive_seg_model=interactive_seg_model, + interactive_seg_device=interactive_seg_device, + enable_remove_bg=enable_remove_bg, + enable_anime_seg=enable_anime_seg, + enable_realesrgan=enable_realesrgan, + realesrgan_device=realesrgan_device, + realesrgan_model=realesrgan_model, + enable_gfpgan=enable_gfpgan, + gfpgan_device=gfpgan_device, + enable_restoreformer=enable_restoreformer, + restoreformer_device=restoreformer_device, + ) + print(api_config.model_dump_json(indent=4)) + api = Api(app, api_config) + api.launch() + + +@typer_app.command(help="Start IOPaint web config page") +def start_web_config( + config_file: Path = Option("config.json"), +): + dump_environment_info() + from iopaint.web_config import main + + main(config_file) diff --git a/iopaint/const.py b/iopaint/const.py new file mode 100644 index 0000000..5cdd5d7 --- /dev/null +++ b/iopaint/const.py @@ -0,0 +1,150 @@ +import json +import os +from pathlib import Path + +from iopaint.schema import ApiConfig, Device, InteractiveSegModel, RealESRGANModel + +INSTRUCT_PIX2PIX_NAME = "timbrooks/instruct-pix2pix" +KANDINSKY22_NAME = "kandinsky-community/kandinsky-2-2-decoder-inpaint" +POWERPAINT_NAME = "Sanster/PowerPaint-V1-stable-diffusion-inpainting" +ANYTEXT_NAME = "Sanster/AnyText" + + +DIFFUSERS_SD_CLASS_NAME = "StableDiffusionPipeline" +DIFFUSERS_SD_INPAINT_CLASS_NAME = "StableDiffusionInpaintPipeline" +DIFFUSERS_SDXL_CLASS_NAME = "StableDiffusionXLPipeline" +DIFFUSERS_SDXL_INPAINT_CLASS_NAME = "StableDiffusionXLInpaintPipeline" + +MPS_UNSUPPORT_MODELS = [ + "lama", + "ldm", + "zits", + "mat", + "fcf", + "cv2", + "manga", +] + +DEFAULT_MODEL = "lama" +AVAILABLE_MODELS = ["lama", "ldm", "zits", "mat", "fcf", "manga", "cv2", "migan"] +DIFFUSION_MODELS = [ + "runwayml/stable-diffusion-inpainting", + "Uminosachi/realisticVisionV51_v51VAE-inpainting", + "redstonehero/dreamshaper-inpainting", + "Sanster/anything-4.0-inpainting", + "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + "Fantasy-Studio/Paint-by-Example", + POWERPAINT_NAME, + ANYTEXT_NAME, +] + +NO_HALF_HELP = """ +Using full precision(fp32) model. +If your diffusion model generate result is always black or green, use this argument. +""" + +CPU_OFFLOAD_HELP = """ +Offloads diffusion model's weight to CPU RAM, significantly reducing vRAM usage. +""" + +LOW_MEM_HELP = "Enable attention slicing and vae tiling to save memory." + +DISABLE_NSFW_HELP = """ +Disable NSFW checker for diffusion model. +""" + +CPU_TEXTENCODER_HELP = """ +Run diffusion models text encoder on CPU to reduce vRAM usage. +""" + +SD_CONTROLNET_CHOICES = [ + "lllyasviel/control_v11p_sd15_canny", + # "lllyasviel/control_v11p_sd15_seg", + "lllyasviel/control_v11p_sd15_openpose", + "lllyasviel/control_v11p_sd15_inpaint", + "lllyasviel/control_v11f1p_sd15_depth", +] + +SD2_CONTROLNET_CHOICES = [ + "thibaud/controlnet-sd21-canny-diffusers", + "thibaud/controlnet-sd21-depth-diffusers", + "thibaud/controlnet-sd21-openpose-diffusers", +] + +SDXL_CONTROLNET_CHOICES = [ + "thibaud/controlnet-openpose-sdxl-1.0", + "destitech/controlnet-inpaint-dreamer-sdxl", + "diffusers/controlnet-canny-sdxl-1.0", + "diffusers/controlnet-canny-sdxl-1.0-mid", + "diffusers/controlnet-canny-sdxl-1.0-small", + "diffusers/controlnet-depth-sdxl-1.0", + "diffusers/controlnet-depth-sdxl-1.0-mid", + "diffusers/controlnet-depth-sdxl-1.0-small", +] + +LOCAL_FILES_ONLY_HELP = """ +When loading diffusion models, using local files only, not connect to HuggingFace server. +""" + +DEFAULT_MODEL_DIR = os.path.abspath( + os.getenv("XDG_CACHE_HOME", os.path.join(os.path.expanduser("~"), ".cache")) +) + +MODEL_DIR_HELP = f""" +Model download directory (by setting XDG_CACHE_HOME environment variable), by default model download to {DEFAULT_MODEL_DIR} +""" + +OUTPUT_DIR_HELP = """ +Result images will be saved to output directory automatically. +""" + +INPUT_HELP = """ +If input is image, it will be loaded by default. +If input is directory, you can browse and select image in file manager. +""" + +GUI_HELP = """ +Launch Lama Cleaner as desktop app +""" + +QUALITY_HELP = """ +Quality of image encoding, 0-100. Default is 95, higher quality will generate larger file size. +""" + +INTERACTIVE_SEG_HELP = "Enable interactive segmentation using Segment Anything." +INTERACTIVE_SEG_MODEL_HELP = "Model size: mobile_sam < vit_b < vit_l < vit_h. Bigger model size means better segmentation but slower speed." +REMOVE_BG_HELP = "Enable remove background. Always run on CPU" +ANIMESEG_HELP = "Enable anime segmentation. Always run on CPU" +REALESRGAN_HELP = "Enable realesrgan super resolution" +GFPGAN_HELP = "Enable GFPGAN face restore. To also enhance background, use with --enable-realesrgan" +RESTOREFORMER_HELP = "Enable RestoreFormer face restore. To also enhance background, use with --enable-realesrgan" +GIF_HELP = "Enable GIF plugin. Make GIF to compare original and cleaned image" + +default_configs = dict( + host="127.0.0.1", + port=8080, + model=DEFAULT_MODEL, + model_dir=DEFAULT_MODEL_DIR, + no_half=False, + low_mem=False, + cpu_offload=False, + disable_nsfw_checker=False, + local_files_only=False, + cpu_textencoder=False, + device=Device.cuda, + input=None, + output_dir=None, + quality=95, + enable_interactive_seg=False, + interactive_seg_model=InteractiveSegModel.vit_b, + interactive_seg_device=Device.cpu, + enable_remove_bg=False, + enable_anime_seg=False, + enable_realesrgan=False, + realesrgan_device=Device.cpu, + realesrgan_model=RealESRGANModel.realesr_general_x4v3, + enable_gfpgan=False, + gfpgan_device=Device.cpu, + enable_restoreformer=False, + restoreformer_device=Device.cpu, +) diff --git a/iopaint/download.py b/iopaint/download.py new file mode 100644 index 0000000..b603804 --- /dev/null +++ b/iopaint/download.py @@ -0,0 +1,240 @@ +import json +import os +from functools import lru_cache +from typing import List + +from loguru import logger +from pathlib import Path + +from iopaint.const import ( + DEFAULT_MODEL_DIR, + DIFFUSERS_SD_CLASS_NAME, + DIFFUSERS_SD_INPAINT_CLASS_NAME, + DIFFUSERS_SDXL_CLASS_NAME, + DIFFUSERS_SDXL_INPAINT_CLASS_NAME, + ANYTEXT_NAME, +) +from iopaint.model.original_sd_configs import get_config_files +from iopaint.model_info import ModelInfo, ModelType + + +def cli_download_model(model: str): + from iopaint.model import models + from iopaint.model.utils import handle_from_pretrained_exceptions + + if model in models and models[model].is_erase_model: + logger.info(f"Downloading {model}...") + models[model].download() + logger.info(f"Done.") + elif model == ANYTEXT_NAME: + logger.info(f"Downloading {model}...") + models[model].download() + logger.info(f"Done.") + else: + logger.info(f"Downloading model from Huggingface: {model}") + from diffusers import DiffusionPipeline + + downloaded_path = handle_from_pretrained_exceptions( + DiffusionPipeline.download, + pretrained_model_name=model, + variant="fp16", + resume_download=True, + ) + logger.info(f"Done. Downloaded to {downloaded_path}") + + +def folder_name_to_show_name(name: str) -> str: + return name.replace("models--", "").replace("--", "/") + + +@lru_cache(maxsize=512) +def get_sd_model_type(model_abs_path: str) -> ModelType: + if "inpaint" in Path(model_abs_path).name.lower(): + model_type = ModelType.DIFFUSERS_SD_INPAINT + else: + # load once to check num_in_channels + from diffusers import StableDiffusionInpaintPipeline + + try: + StableDiffusionInpaintPipeline.from_single_file( + model_abs_path, + load_safety_checker=False, + local_files_only=True, + num_in_channels=9, + config_files=get_config_files(), + ) + model_type = ModelType.DIFFUSERS_SD_INPAINT + except ValueError as e: + if "Trying to set a tensor of shape torch.Size([320, 4, 3, 3])" in str(e): + model_type = ModelType.DIFFUSERS_SD + else: + raise e + return model_type + + +@lru_cache() +def get_sdxl_model_type(model_abs_path: str) -> ModelType: + if "inpaint" in model_abs_path: + model_type = ModelType.DIFFUSERS_SDXL_INPAINT + else: + # load once to check num_in_channels + from diffusers import StableDiffusionXLInpaintPipeline + + try: + model = StableDiffusionXLInpaintPipeline.from_single_file( + model_abs_path, + load_safety_checker=False, + local_files_only=True, + num_in_channels=9, + config_files=get_config_files(), + ) + if model.unet.config.in_channels == 9: + # https://github.com/huggingface/diffusers/issues/6610 + model_type = ModelType.DIFFUSERS_SDXL_INPAINT + else: + model_type = ModelType.DIFFUSERS_SDXL + except ValueError as e: + if "Trying to set a tensor of shape torch.Size([320, 4, 3, 3])" in str(e): + model_type = ModelType.DIFFUSERS_SDXL + else: + raise e + return model_type + + +def scan_single_file_diffusion_models(cache_dir) -> List[ModelInfo]: + cache_dir = Path(cache_dir) + stable_diffusion_dir = cache_dir / "stable_diffusion" + cache_file = stable_diffusion_dir / "iopaint_cache.json" + model_type_cache = {} + if cache_file.exists(): + try: + with open(cache_file, "r", encoding="utf-8") as f: + model_type_cache = json.load(f) + assert isinstance(model_type_cache, dict) + except: + pass + + res = [] + for it in stable_diffusion_dir.glob(f"*.*"): + if it.suffix not in [".safetensors", ".ckpt"]: + continue + model_abs_path = str(it.absolute()) + model_type = model_type_cache.get(it.name) + if model_type is None: + model_type = get_sd_model_type(model_abs_path) + model_type_cache[it.name] = model_type + res.append( + ModelInfo( + name=it.name, + path=model_abs_path, + model_type=model_type, + is_single_file_diffusers=True, + ) + ) + if stable_diffusion_dir.exists(): + with open(cache_file, "w", encoding="utf-8") as fw: + json.dump(model_type_cache, fw, indent=2, ensure_ascii=False) + + stable_diffusion_xl_dir = cache_dir / "stable_diffusion_xl" + sdxl_cache_file = stable_diffusion_xl_dir / "iopaint_cache.json" + sdxl_model_type_cache = {} + if sdxl_cache_file.exists(): + try: + with open(sdxl_cache_file, "r", encoding="utf-8") as f: + sdxl_model_type_cache = json.load(f) + assert isinstance(sdxl_model_type_cache, dict) + except: + pass + + for it in stable_diffusion_xl_dir.glob(f"*.*"): + if it.suffix not in [".safetensors", ".ckpt"]: + continue + model_abs_path = str(it.absolute()) + model_type = sdxl_model_type_cache.get(it.name) + if model_type is None: + model_type = get_sdxl_model_type(model_abs_path) + sdxl_model_type_cache[it.name] = model_type + if stable_diffusion_xl_dir.exists(): + with open(sdxl_cache_file, "w", encoding="utf-8") as fw: + json.dump(sdxl_model_type_cache, fw, indent=2, ensure_ascii=False) + + res.append( + ModelInfo( + name=it.name, + path=model_abs_path, + model_type=model_type, + is_single_file_diffusers=True, + ) + ) + return res + + +def scan_inpaint_models(model_dir: Path) -> List[ModelInfo]: + res = [] + from iopaint.model import models + + # logger.info(f"Scanning inpaint models in {model_dir}") + + for name, m in models.items(): + if m.is_erase_model and m.is_downloaded(): + res.append( + ModelInfo( + name=name, + path=name, + model_type=ModelType.INPAINT, + ) + ) + return res + + +def scan_models() -> List[ModelInfo]: + from huggingface_hub.constants import HF_HUB_CACHE + + model_dir = os.getenv("XDG_CACHE_HOME", DEFAULT_MODEL_DIR) + available_models = [] + available_models.extend(scan_inpaint_models(model_dir)) + available_models.extend(scan_single_file_diffusion_models(model_dir)) + cache_dir = Path(HF_HUB_CACHE) + # logger.info(f"Scanning diffusers models in {cache_dir}") + diffusers_model_names = [] + for it in cache_dir.glob("**/*/model_index.json"): + with open(it, "r", encoding="utf-8") as f: + try: + data = json.load(f) + except: + continue + + _class_name = data["_class_name"] + name = folder_name_to_show_name(it.parent.parent.parent.name) + if name in diffusers_model_names: + continue + if "PowerPaint" in name: + model_type = ModelType.DIFFUSERS_OTHER + elif _class_name == DIFFUSERS_SD_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SD + elif _class_name == DIFFUSERS_SD_INPAINT_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SD_INPAINT + elif _class_name == DIFFUSERS_SDXL_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SDXL + elif _class_name == DIFFUSERS_SDXL_INPAINT_CLASS_NAME: + model_type = ModelType.DIFFUSERS_SDXL_INPAINT + elif _class_name in [ + "StableDiffusionInstructPix2PixPipeline", + "PaintByExamplePipeline", + "KandinskyV22InpaintPipeline", + "AnyText", + ]: + model_type = ModelType.DIFFUSERS_OTHER + else: + continue + + diffusers_model_names.append(name) + available_models.append( + ModelInfo( + name=name, + path=name, + model_type=model_type, + ) + ) + + return available_models diff --git a/lama_cleaner/file_manager/__init__.py b/iopaint/file_manager/__init__.py similarity index 100% rename from lama_cleaner/file_manager/__init__.py rename to iopaint/file_manager/__init__.py diff --git a/iopaint/file_manager/file_manager.py b/iopaint/file_manager/file_manager.py new file mode 100644 index 0000000..cb33278 --- /dev/null +++ b/iopaint/file_manager/file_manager.py @@ -0,0 +1,222 @@ +import os +from io import BytesIO +from pathlib import Path +from typing import List + +from PIL import Image, ImageOps, PngImagePlugin +from fastapi import FastAPI, UploadFile, HTTPException +from starlette.responses import FileResponse + +from ..schema import MediasResponse, MediaTab + +LARGE_ENOUGH_NUMBER = 100 +PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2) +from .storage_backends import FilesystemStorageBackend +from .utils import aspect_to_string, generate_filename, glob_img + + +class FileManager: + def __init__(self, app: FastAPI, input_dir: Path, output_dir: Path): + self.app = app + self.input_dir: Path = input_dir + self.output_dir: Path = output_dir + + self.image_dir_filenames = [] + self.output_dir_filenames = [] + if not self.thumbnail_directory.exists(): + self.thumbnail_directory.mkdir(parents=True) + + # fmt: off + self.app.add_api_route("/api/v1/save_image", self.api_save_image, methods=["POST"]) + self.app.add_api_route("/api/v1/medias", self.api_medias, methods=["GET"], response_model=List[MediasResponse]) + self.app.add_api_route("/api/v1/media_file", self.api_media_file, methods=["GET"]) + self.app.add_api_route("/api/v1/media_thumbnail_file", self.api_media_thumbnail_file, methods=["GET"]) + # fmt: on + + def api_save_image(self, file: UploadFile): + filename = file.filename + origin_image_bytes = file.file.read() + with open(self.output_dir / filename, "wb") as fw: + fw.write(origin_image_bytes) + + def api_medias(self, tab: MediaTab) -> List[MediasResponse]: + img_dir = self._get_dir(tab) + return self._media_names(img_dir) + + def api_media_file(self, tab: MediaTab, filename: str) -> FileResponse: + file_path = self._get_file(tab, filename) + return FileResponse(file_path, media_type="image/png") + + # tab=${tab}?filename=${filename.name}?width=${width}&height=${height} + def api_media_thumbnail_file( + self, tab: MediaTab, filename: str, width: int, height: int + ) -> FileResponse: + img_dir = self._get_dir(tab) + thumb_filename, (width, height) = self.get_thumbnail( + img_dir, filename, width=width, height=height + ) + thumbnail_filepath = self.thumbnail_directory / thumb_filename + return FileResponse( + thumbnail_filepath, + headers={ + "X-Width": str(width), + "X-Height": str(height), + }, + media_type="image/jpeg", + ) + + def _get_dir(self, tab: MediaTab) -> Path: + if tab == "input": + return self.input_dir + elif tab == "output": + return self.output_dir + else: + raise HTTPException(status_code=422, detail=f"tab not found: {tab}") + + def _get_file(self, tab: MediaTab, filename: str) -> Path: + file_path = self._get_dir(tab) / filename + if not file_path.exists(): + raise HTTPException(status_code=422, detail=f"file not found: {file_path}") + return file_path + + @property + def thumbnail_directory(self) -> Path: + return self.output_dir / "thumbnails" + + @staticmethod + def _media_names(directory: Path) -> List[MediasResponse]: + names = sorted([it.name for it in glob_img(directory)]) + res = [] + for name in names: + path = os.path.join(directory, name) + img = Image.open(path) + res.append( + MediasResponse( + name=name, + height=img.height, + width=img.width, + ctime=os.path.getctime(path), + mtime=os.path.getmtime(path), + ) + ) + return res + + def get_thumbnail( + self, directory: Path, original_filename: str, width, height, **options + ): + directory = Path(directory) + storage = FilesystemStorageBackend(self.app) + crop = options.get("crop", "fit") + background = options.get("background") + quality = options.get("quality", 90) + + original_path, original_filename = os.path.split(original_filename) + original_filepath = os.path.join(directory, original_path, original_filename) + image = Image.open(BytesIO(storage.read(original_filepath))) + + # keep ratio resize + if not width and not height: + width = 256 + + if width != 0: + height = int(image.height * width / image.width) + else: + width = int(image.width * height / image.height) + + thumbnail_size = (width, height) + + thumbnail_filename = generate_filename( + directory, + original_filename, + aspect_to_string(thumbnail_size), + crop, + background, + quality, + ) + + thumbnail_filepath = os.path.join( + self.thumbnail_directory, original_path, thumbnail_filename + ) + + if storage.exists(thumbnail_filepath): + return thumbnail_filepath, (width, height) + + try: + image.load() + except (IOError, OSError): + self.app.logger.warning("Thumbnail not load image: %s", original_filepath) + return thumbnail_filepath, (width, height) + + # get original image format + options["format"] = options.get("format", image.format) + + image = self._create_thumbnail( + image, thumbnail_size, crop, background=background + ) + + raw_data = self.get_raw_data(image, **options) + storage.save(thumbnail_filepath, raw_data) + + return thumbnail_filepath, (width, height) + + def get_raw_data(self, image, **options): + data = { + "format": self._get_format(image, **options), + "quality": options.get("quality", 90), + } + + _file = BytesIO() + image.save(_file, **data) + return _file.getvalue() + + @staticmethod + def colormode(image, colormode="RGB"): + if colormode == "RGB" or colormode == "RGBA": + if image.mode == "RGBA": + return image + if image.mode == "LA": + return image.convert("RGBA") + return image.convert(colormode) + + if colormode == "GRAY": + return image.convert("L") + + return image.convert(colormode) + + @staticmethod + def background(original_image, color=0xFF): + size = (max(original_image.size),) * 2 + image = Image.new("L", size, color) + image.paste( + original_image, + tuple(map(lambda x: (x[0] - x[1]) / 2, zip(size, original_image.size))), + ) + + return image + + def _get_format(self, image, **options): + if options.get("format"): + return options.get("format") + if image.format: + return image.format + + return "JPEG" + + def _create_thumbnail(self, image, size, crop="fit", background=None): + try: + resample = Image.Resampling.LANCZOS + except AttributeError: # pylint: disable=raise-missing-from + resample = Image.ANTIALIAS + + if crop == "fit": + image = ImageOps.fit(image, size, resample) + else: + image = image.copy() + image.thumbnail(size, resample=resample) + + if background is not None: + image = self.background(image) + + image = self.colormode(image) + + return image diff --git a/lama_cleaner/file_manager/storage_backends.py b/iopaint/file_manager/storage_backends.py similarity index 100% rename from lama_cleaner/file_manager/storage_backends.py rename to iopaint/file_manager/storage_backends.py diff --git a/lama_cleaner/file_manager/utils.py b/iopaint/file_manager/utils.py similarity index 80% rename from lama_cleaner/file_manager/utils.py rename to iopaint/file_manager/utils.py index 2a05671..f6890af 100644 --- a/lama_cleaner/file_manager/utils.py +++ b/iopaint/file_manager/utils.py @@ -1,19 +1,17 @@ # Copy from: https://github.com/silentsokolov/flask-thumbnails/blob/master/flask_thumbnails/utils.py -import importlib -import os +import hashlib from pathlib import Path from typing import Union -def generate_filename(original_filename, *options): - name, ext = os.path.splitext(original_filename) +def generate_filename(directory: Path, original_filename, *options) -> str: + text = str(directory.absolute()) + original_filename for v in options: - if v: - name += "_%s" % v - name += ext - - return name + text += "%s" % v + md5_hash = hashlib.md5() + md5_hash.update(text.encode("utf-8")) + return md5_hash.hexdigest() + ".jpg" def parse_size(size): @@ -48,7 +46,7 @@ def aspect_to_string(size): return "x".join(map(str, size)) -IMG_SUFFIX = {'.jpg', '.jpeg', '.png', '.JPG', '.JPEG', '.PNG'} +IMG_SUFFIX = {".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG"} def glob_img(p: Union[Path, str], recursive: bool = False): diff --git a/lama_cleaner/helper.py b/iopaint/helper.py similarity index 64% rename from lama_cleaner/helper.py rename to iopaint/helper.py index babbeac..9d08996 100644 --- a/lama_cleaner/helper.py +++ b/iopaint/helper.py @@ -1,14 +1,16 @@ +import base64 +import imghdr import io import os import sys -from typing import List, Optional +from typing import List, Optional, Dict, Tuple from urllib.parse import urlparse import cv2 from PIL import Image, ImageOps, PngImagePlugin import numpy as np import torch -from lama_cleaner.const import MPS_SUPPORT_MODELS +from iopaint.const import MPS_UNSUPPORT_MODELS from loguru import logger from torch.hub import download_url_to_file, get_dir import hashlib @@ -23,7 +25,7 @@ def md5sum(filename): def switch_mps_device(model_name, device): - if model_name not in MPS_SUPPORT_MODELS and str(device) == "mps": + if model_name in MPS_UNSUPPORT_MODELS and str(device) == "mps": logger.info(f"{model_name} not support mps, switch to cpu") return torch.device("cpu") return device @@ -54,12 +56,12 @@ def download_model(url, model_md5: str = None): try: os.remove(cached_file) logger.error( - f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner." + f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint." f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n" ) except: logger.error( - f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart lama-cleaner." + f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart iopaint." ) exit(-1) @@ -78,12 +80,12 @@ def handle_error(model_path, model_md5, e): try: os.remove(model_path) logger.error( - f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner." + f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart iopaint." f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n" ) except: logger.error( - f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart lama-cleaner." + f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart iopaint." ) else: logger.error( @@ -135,31 +137,27 @@ def numpy_to_bytes(image_numpy: np.ndarray, ext: str) -> bytes: return image_bytes -def pil_to_bytes(pil_img, ext: str, quality: int = 95, exif_infos={}) -> bytes: +def pil_to_bytes(pil_img, ext: str, quality: int = 95, infos={}) -> bytes: with io.BytesIO() as output: - kwargs = {k: v for k, v in exif_infos.items() if v is not None} - if ext == "png" and "parameters" in kwargs: + kwargs = {k: v for k, v in infos.items() if v is not None} + if ext == "jpg": + ext = "jpeg" + if "png" == ext.lower() and "parameters" in kwargs: pnginfo_data = PngImagePlugin.PngInfo() pnginfo_data.add_text("parameters", kwargs["parameters"]) kwargs["pnginfo"] = pnginfo_data - pil_img.save( - output, - format=ext, - quality=quality, - **kwargs, - ) + pil_img.save(output, format=ext, quality=quality, **kwargs) image_bytes = output.getvalue() return image_bytes -def load_img(img_bytes, gray: bool = False, return_exif: bool = False): +def load_img(img_bytes, gray: bool = False, return_info: bool = False): alpha_channel = None image = Image.open(io.BytesIO(img_bytes)) - if return_exif: - info = image.info or {} - exif_infos = {"exif": image.getexif(), "parameters": info.get("parameters")} + if return_info: + infos = image.info try: image = ImageOps.exif_transpose(image) @@ -178,8 +176,8 @@ def load_img(img_bytes, gray: bool = False, return_exif: bool = False): image = image.convert("RGB") np_img = np.array(image) - if return_exif: - return np_img, alpha_channel, exif_infos + if return_info: + return np_img, alpha_channel, infos return np_img, alpha_channel @@ -290,3 +288,118 @@ def only_keep_largest_contour(mask: np.ndarray) -> List[np.ndarray]: return cv2.drawContours(new_mask, contours, max_index, 255, -1) else: return mask + + +def is_mac(): + return sys.platform == "darwin" + + +def get_image_ext(img_bytes): + w = imghdr.what("", img_bytes) + if w is None: + w = "jpeg" + return w + + +def decode_base64_to_image( + encoding: str, gray=False +) -> Tuple[np.array, Optional[np.array], Dict]: + if encoding.startswith("data:image/") or encoding.startswith( + "data:application/octet-stream;base64," + ): + encoding = encoding.split(";")[1].split(",")[1] + image = Image.open(io.BytesIO(base64.b64decode(encoding))) + + alpha_channel = None + try: + image = ImageOps.exif_transpose(image) + except: + pass + # exif_transpose will remove exif rotate info,we must call image.info after exif_transpose + infos = image.info + + if gray: + image = image.convert("L") + np_img = np.array(image) + else: + if image.mode == "RGBA": + np_img = np.array(image) + alpha_channel = np_img[:, :, -1] + np_img = cv2.cvtColor(np_img, cv2.COLOR_RGBA2RGB) + else: + image = image.convert("RGB") + np_img = np.array(image) + + return np_img, alpha_channel, infos + + +def encode_pil_to_base64(image: Image, quality: int, infos: Dict) -> bytes: + img_bytes = pil_to_bytes( + image, + "png", + quality=quality, + infos=infos, + ) + return base64.b64encode(img_bytes) + + +def concat_alpha_channel(rgb_np_img, alpha_channel) -> np.ndarray: + if alpha_channel is not None: + if alpha_channel.shape[:2] != rgb_np_img.shape[:2]: + alpha_channel = cv2.resize( + alpha_channel, dsize=(rgb_np_img.shape[1], rgb_np_img.shape[0]) + ) + rgb_np_img = np.concatenate( + (rgb_np_img, alpha_channel[:, :, np.newaxis]), axis=-1 + ) + return rgb_np_img + + +def adjust_mask(mask: np.ndarray, kernel_size: int, operate): + # fronted brush color "ffcc00bb" + # kernel_size = kernel_size*2+1 + mask[mask >= 127] = 255 + mask[mask < 127] = 0 + + if operate == "reverse": + mask = 255 - mask + else: + kernel = cv2.getStructuringElement( + cv2.MORPH_ELLIPSE, (2 * kernel_size + 1, 2 * kernel_size + 1) + ) + if operate == "expand": + mask = cv2.dilate( + mask, + kernel, + iterations=1, + ) + else: + mask = cv2.erode( + mask, + kernel, + iterations=1, + ) + res_mask = np.zeros((mask.shape[0], mask.shape[1], 4), dtype=np.uint8) + res_mask[mask > 128] = [255, 203, 0, int(255 * 0.73)] + res_mask = cv2.cvtColor(res_mask, cv2.COLOR_BGRA2RGBA) + return res_mask + + +def gen_frontend_mask(bgr_or_gray_mask): + if len(bgr_or_gray_mask.shape) == 3 and bgr_or_gray_mask.shape[2] != 1: + bgr_or_gray_mask = cv2.cvtColor(bgr_or_gray_mask, cv2.COLOR_BGR2GRAY) + + # fronted brush color "ffcc00bb" + # TODO: how to set kernel size? + kernel_size = 9 + bgr_or_gray_mask = cv2.dilate( + bgr_or_gray_mask, + np.ones((kernel_size, kernel_size), np.uint8), + iterations=1, + ) + res_mask = np.zeros( + (bgr_or_gray_mask.shape[0], bgr_or_gray_mask.shape[1], 4), dtype=np.uint8 + ) + res_mask[bgr_or_gray_mask > 128] = [255, 203, 0, int(255 * 0.73)] + res_mask = cv2.cvtColor(res_mask, cv2.COLOR_BGRA2RGBA) + return res_mask diff --git a/lama_cleaner/installer.py b/iopaint/installer.py similarity index 100% rename from lama_cleaner/installer.py rename to iopaint/installer.py diff --git a/iopaint/model/__init__.py b/iopaint/model/__init__.py new file mode 100644 index 0000000..799e2ec --- /dev/null +++ b/iopaint/model/__init__.py @@ -0,0 +1,37 @@ +from .anytext.anytext_model import AnyText +from .controlnet import ControlNet +from .fcf import FcF +from .instruct_pix2pix import InstructPix2Pix +from .kandinsky import Kandinsky22 +from .lama import LaMa +from .ldm import LDM +from .manga import Manga +from .mat import MAT +from .mi_gan import MIGAN +from .opencv2 import OpenCV2 +from .paint_by_example import PaintByExample +from .power_paint.power_paint import PowerPaint +from .sd import SD15, SD2, Anything4, RealisticVision14, SD +from .sdxl import SDXL +from .zits import ZITS + +models = { + LaMa.name: LaMa, + LDM.name: LDM, + ZITS.name: ZITS, + MAT.name: MAT, + FcF.name: FcF, + OpenCV2.name: OpenCV2, + Manga.name: Manga, + MIGAN.name: MIGAN, + SD15.name: SD15, + Anything4.name: Anything4, + RealisticVision14.name: RealisticVision14, + SD2.name: SD2, + PaintByExample.name: PaintByExample, + InstructPix2Pix.name: InstructPix2Pix, + Kandinsky22.name: Kandinsky22, + SDXL.name: SDXL, + PowerPaint.name: PowerPaint, + AnyText.name: AnyText, +} diff --git a/lama_cleaner/model/__init__.py b/iopaint/model/anytext/__init__.py similarity index 100% rename from lama_cleaner/model/__init__.py rename to iopaint/model/anytext/__init__.py diff --git a/iopaint/model/anytext/anytext_model.py b/iopaint/model/anytext/anytext_model.py new file mode 100644 index 0000000..374669e --- /dev/null +++ b/iopaint/model/anytext/anytext_model.py @@ -0,0 +1,73 @@ +import torch +from huggingface_hub import hf_hub_download + +from iopaint.const import ANYTEXT_NAME +from iopaint.model.anytext.anytext_pipeline import AnyTextPipeline +from iopaint.model.base import DiffusionInpaintModel +from iopaint.model.utils import get_torch_dtype, is_local_files_only +from iopaint.schema import InpaintRequest + + +class AnyText(DiffusionInpaintModel): + name = ANYTEXT_NAME + pad_mod = 64 + is_erase_model = False + + @staticmethod + def download(local_files_only=False): + hf_hub_download( + repo_id=ANYTEXT_NAME, + filename="model_index.json", + local_files_only=local_files_only, + ) + ckpt_path = hf_hub_download( + repo_id=ANYTEXT_NAME, + filename="pytorch_model.fp16.safetensors", + local_files_only=local_files_only, + ) + font_path = hf_hub_download( + repo_id=ANYTEXT_NAME, + filename="SourceHanSansSC-Medium.otf", + local_files_only=local_files_only, + ) + return ckpt_path, font_path + + def init_model(self, device, **kwargs): + local_files_only = is_local_files_only(**kwargs) + ckpt_path, font_path = self.download(local_files_only) + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + self.model = AnyTextPipeline( + ckpt_path=ckpt_path, + font_path=font_path, + device=device, + use_fp16=torch_dtype == torch.float16, + ) + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to inpainting + return: BGR IMAGE + """ + height, width = image.shape[:2] + mask = mask.astype("float32") / 255.0 + masked_image = image * (1 - mask) + + # list of rgb ndarray + results, rtn_code, rtn_warning = self.model( + image=image, + masked_image=masked_image, + prompt=config.prompt, + negative_prompt=config.negative_prompt, + num_inference_steps=config.sd_steps, + strength=config.sd_strength, + guidance_scale=config.sd_guidance_scale, + height=height, + width=width, + seed=config.sd_seed, + sort_priority="y", + callback=self.callback + ) + inpainted_rgb_image = results[0][..., ::-1] + return inpainted_rgb_image diff --git a/iopaint/model/anytext/anytext_pipeline.py b/iopaint/model/anytext/anytext_pipeline.py new file mode 100644 index 0000000..5051272 --- /dev/null +++ b/iopaint/model/anytext/anytext_pipeline.py @@ -0,0 +1,403 @@ +""" +AnyText: Multilingual Visual Text Generation And Editing +Paper: https://arxiv.org/abs/2311.03054 +Code: https://github.com/tyxsspa/AnyText +Copyright (c) Alibaba, Inc. and its affiliates. +""" +import os +from pathlib import Path + +from iopaint.model.utils import set_seed +from safetensors.torch import load_file + +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" +import torch +import re +import numpy as np +import cv2 +import einops +from PIL import ImageFont +from iopaint.model.anytext.cldm.model import create_model, load_state_dict +from iopaint.model.anytext.cldm.ddim_hacked import DDIMSampler +from iopaint.model.anytext.utils import ( + check_channels, + draw_glyph, + draw_glyph2, +) + + +BBOX_MAX_NUM = 8 +PLACE_HOLDER = "*" +max_chars = 20 + +ANYTEXT_CFG = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "anytext_sd15.yaml" +) + + +def check_limits(tensor): + float16_min = torch.finfo(torch.float16).min + float16_max = torch.finfo(torch.float16).max + + # 检查张量中是否有值小于float16的最小值或大于float16的最大值 + is_below_min = (tensor < float16_min).any() + is_above_max = (tensor > float16_max).any() + + return is_below_min or is_above_max + + +class AnyTextPipeline: + def __init__(self, ckpt_path, font_path, device, use_fp16=True): + self.cfg_path = ANYTEXT_CFG + self.font_path = font_path + self.use_fp16 = use_fp16 + self.device = device + + self.font = ImageFont.truetype(font_path, size=60) + self.model = create_model( + self.cfg_path, + device=self.device, + use_fp16=self.use_fp16, + ) + if self.use_fp16: + self.model = self.model.half() + if Path(ckpt_path).suffix == ".safetensors": + state_dict = load_file(ckpt_path, device="cpu") + else: + state_dict = load_state_dict(ckpt_path, location="cpu") + self.model.load_state_dict(state_dict, strict=False) + self.model = self.model.eval().to(self.device) + self.ddim_sampler = DDIMSampler(self.model, device=self.device) + + def __call__( + self, + prompt: str, + negative_prompt: str, + image: np.ndarray, + masked_image: np.ndarray, + num_inference_steps: int, + strength: float, + guidance_scale: float, + height: int, + width: int, + seed: int, + sort_priority: str = "y", + callback=None, + ): + """ + + Args: + prompt: + negative_prompt: + image: + masked_image: + num_inference_steps: + strength: + guidance_scale: + height: + width: + seed: + sort_priority: x: left-right, y: top-down + + Returns: + result: list of images in numpy.ndarray format + rst_code: 0: normal -1: error 1:warning + rst_info: string of error or warning + + """ + set_seed(seed) + str_warning = "" + + mode = "text-editing" + revise_pos = False + img_count = 1 + ddim_steps = num_inference_steps + w = width + h = height + strength = strength + cfg_scale = guidance_scale + eta = 0.0 + + prompt, texts = self.modify_prompt(prompt) + if prompt is None and texts is None: + return ( + None, + -1, + "You have input Chinese prompt but the translator is not loaded!", + "", + ) + n_lines = len(texts) + if mode in ["text-generation", "gen"]: + edit_image = np.ones((h, w, 3)) * 127.5 # empty mask image + elif mode in ["text-editing", "edit"]: + if masked_image is None or image is None: + return ( + None, + -1, + "Reference image and position image are needed for text editing!", + "", + ) + if isinstance(image, str): + image = cv2.imread(image)[..., ::-1] + assert image is not None, f"Can't read ori_image image from{image}!" + elif isinstance(image, torch.Tensor): + image = image.cpu().numpy() + else: + assert isinstance( + image, np.ndarray + ), f"Unknown format of ori_image: {type(image)}" + edit_image = image.clip(1, 255) # for mask reason + edit_image = check_channels(edit_image) + # edit_image = resize_image( + # edit_image, max_length=768 + # ) # make w h multiple of 64, resize if w or h > max_length + h, w = edit_image.shape[:2] # change h, w by input ref_img + # preprocess pos_imgs(if numpy, make sure it's white pos in black bg) + if masked_image is None: + pos_imgs = np.zeros((w, h, 1)) + if isinstance(masked_image, str): + masked_image = cv2.imread(masked_image)[..., ::-1] + assert ( + masked_image is not None + ), f"Can't read draw_pos image from{masked_image}!" + pos_imgs = 255 - masked_image + elif isinstance(masked_image, torch.Tensor): + pos_imgs = masked_image.cpu().numpy() + else: + assert isinstance( + masked_image, np.ndarray + ), f"Unknown format of draw_pos: {type(masked_image)}" + pos_imgs = 255 - masked_image + pos_imgs = pos_imgs[..., 0:1] + pos_imgs = cv2.convertScaleAbs(pos_imgs) + _, pos_imgs = cv2.threshold(pos_imgs, 254, 255, cv2.THRESH_BINARY) + # seprate pos_imgs + pos_imgs = self.separate_pos_imgs(pos_imgs, sort_priority) + if len(pos_imgs) == 0: + pos_imgs = [np.zeros((h, w, 1))] + if len(pos_imgs) < n_lines: + if n_lines == 1 and texts[0] == " ": + pass # text-to-image without text + else: + raise RuntimeError( + f"{n_lines} text line to draw from prompt, not enough mask area({len(pos_imgs)}) on images" + ) + elif len(pos_imgs) > n_lines: + str_warning = f"Warning: found {len(pos_imgs)} positions that > needed {n_lines} from prompt." + # get pre_pos, poly_list, hint that needed for anytext + pre_pos = [] + poly_list = [] + for input_pos in pos_imgs: + if input_pos.mean() != 0: + input_pos = ( + input_pos[..., np.newaxis] + if len(input_pos.shape) == 2 + else input_pos + ) + poly, pos_img = self.find_polygon(input_pos) + pre_pos += [pos_img / 255.0] + poly_list += [poly] + else: + pre_pos += [np.zeros((h, w, 1))] + poly_list += [None] + np_hint = np.sum(pre_pos, axis=0).clip(0, 1) + # prepare info dict + info = {} + info["glyphs"] = [] + info["gly_line"] = [] + info["positions"] = [] + info["n_lines"] = [len(texts)] * img_count + gly_pos_imgs = [] + for i in range(len(texts)): + text = texts[i] + if len(text) > max_chars: + str_warning = ( + f'"{text}" length > max_chars: {max_chars}, will be cut off...' + ) + text = text[:max_chars] + gly_scale = 2 + if pre_pos[i].mean() != 0: + gly_line = draw_glyph(self.font, text) + glyphs = draw_glyph2( + self.font, + text, + poly_list[i], + scale=gly_scale, + width=w, + height=h, + add_space=False, + ) + gly_pos_img = cv2.drawContours( + glyphs * 255, [poly_list[i] * gly_scale], 0, (255, 255, 255), 1 + ) + if revise_pos: + resize_gly = cv2.resize( + glyphs, (pre_pos[i].shape[1], pre_pos[i].shape[0]) + ) + new_pos = cv2.morphologyEx( + (resize_gly * 255).astype(np.uint8), + cv2.MORPH_CLOSE, + kernel=np.ones( + (resize_gly.shape[0] // 10, resize_gly.shape[1] // 10), + dtype=np.uint8, + ), + iterations=1, + ) + new_pos = ( + new_pos[..., np.newaxis] if len(new_pos.shape) == 2 else new_pos + ) + contours, _ = cv2.findContours( + new_pos, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE + ) + if len(contours) != 1: + str_warning = f"Fail to revise position {i} to bounding rect, remain position unchanged..." + else: + rect = cv2.minAreaRect(contours[0]) + poly = np.int0(cv2.boxPoints(rect)) + pre_pos[i] = ( + cv2.drawContours(new_pos, [poly], -1, 255, -1) / 255.0 + ) + gly_pos_img = cv2.drawContours( + glyphs * 255, [poly * gly_scale], 0, (255, 255, 255), 1 + ) + gly_pos_imgs += [gly_pos_img] # for show + else: + glyphs = np.zeros((h * gly_scale, w * gly_scale, 1)) + gly_line = np.zeros((80, 512, 1)) + gly_pos_imgs += [ + np.zeros((h * gly_scale, w * gly_scale, 1)) + ] # for show + pos = pre_pos[i] + info["glyphs"] += [self.arr2tensor(glyphs, img_count)] + info["gly_line"] += [self.arr2tensor(gly_line, img_count)] + info["positions"] += [self.arr2tensor(pos, img_count)] + # get masked_x + masked_img = ((edit_image.astype(np.float32) / 127.5) - 1.0) * (1 - np_hint) + masked_img = np.transpose(masked_img, (2, 0, 1)) + masked_img = torch.from_numpy(masked_img.copy()).float().to(self.device) + if self.use_fp16: + masked_img = masked_img.half() + encoder_posterior = self.model.encode_first_stage(masked_img[None, ...]) + masked_x = self.model.get_first_stage_encoding(encoder_posterior).detach() + if self.use_fp16: + masked_x = masked_x.half() + info["masked_x"] = torch.cat([masked_x for _ in range(img_count)], dim=0) + + hint = self.arr2tensor(np_hint, img_count) + cond = self.model.get_learned_conditioning( + dict( + c_concat=[hint], + c_crossattn=[[prompt] * img_count], + text_info=info, + ) + ) + un_cond = self.model.get_learned_conditioning( + dict( + c_concat=[hint], + c_crossattn=[[negative_prompt] * img_count], + text_info=info, + ) + ) + shape = (4, h // 8, w // 8) + self.model.control_scales = [strength] * 13 + samples, intermediates = self.ddim_sampler.sample( + ddim_steps, + img_count, + shape, + cond, + verbose=False, + eta=eta, + unconditional_guidance_scale=cfg_scale, + unconditional_conditioning=un_cond, + callback=callback + ) + if self.use_fp16: + samples = samples.half() + x_samples = self.model.decode_first_stage(samples) + x_samples = ( + (einops.rearrange(x_samples, "b c h w -> b h w c") * 127.5 + 127.5) + .cpu() + .numpy() + .clip(0, 255) + .astype(np.uint8) + ) + results = [x_samples[i] for i in range(img_count)] + # if ( + # mode == "edit" and False + # ): # replace backgound in text editing but not ideal yet + # results = [r * np_hint + edit_image * (1 - np_hint) for r in results] + # results = [r.clip(0, 255).astype(np.uint8) for r in results] + # if len(gly_pos_imgs) > 0 and show_debug: + # glyph_bs = np.stack(gly_pos_imgs, axis=2) + # glyph_img = np.sum(glyph_bs, axis=2) * 255 + # glyph_img = glyph_img.clip(0, 255).astype(np.uint8) + # results += [np.repeat(glyph_img, 3, axis=2)] + rst_code = 1 if str_warning else 0 + return results, rst_code, str_warning + + def modify_prompt(self, prompt): + prompt = prompt.replace("“", '"') + prompt = prompt.replace("”", '"') + p = '"(.*?)"' + strs = re.findall(p, prompt) + if len(strs) == 0: + strs = [" "] + else: + for s in strs: + prompt = prompt.replace(f'"{s}"', f" {PLACE_HOLDER} ", 1) + # if self.is_chinese(prompt): + # if self.trans_pipe is None: + # return None, None + # old_prompt = prompt + # prompt = self.trans_pipe(input=prompt + " .")["translation"][:-1] + # print(f"Translate: {old_prompt} --> {prompt}") + return prompt, strs + + # def is_chinese(self, text): + # text = checker._clean_text(text) + # for char in text: + # cp = ord(char) + # if checker._is_chinese_char(cp): + # return True + # return False + + def separate_pos_imgs(self, img, sort_priority, gap=102): + num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(img) + components = [] + for label in range(1, num_labels): + component = np.zeros_like(img) + component[labels == label] = 255 + components.append((component, centroids[label])) + if sort_priority == "y": + fir, sec = 1, 0 # top-down first + elif sort_priority == "x": + fir, sec = 0, 1 # left-right first + components.sort(key=lambda c: (c[1][fir] // gap, c[1][sec] // gap)) + sorted_components = [c[0] for c in components] + return sorted_components + + def find_polygon(self, image, min_rect=False): + contours, hierarchy = cv2.findContours( + image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE + ) + max_contour = max(contours, key=cv2.contourArea) # get contour with max area + if min_rect: + # get minimum enclosing rectangle + rect = cv2.minAreaRect(max_contour) + poly = np.int0(cv2.boxPoints(rect)) + else: + # get approximate polygon + epsilon = 0.01 * cv2.arcLength(max_contour, True) + poly = cv2.approxPolyDP(max_contour, epsilon, True) + n, _, xy = poly.shape + poly = poly.reshape(n, xy) + cv2.drawContours(image, [poly], -1, 255, -1) + return poly, image + + def arr2tensor(self, arr, bs): + arr = np.transpose(arr, (2, 0, 1)) + _arr = torch.from_numpy(arr.copy()).float().to(self.device) + if self.use_fp16: + _arr = _arr.half() + _arr = torch.stack([_arr for _ in range(bs)], dim=0) + return _arr diff --git a/iopaint/model/anytext/anytext_sd15.yaml b/iopaint/model/anytext/anytext_sd15.yaml new file mode 100644 index 0000000..d727594 --- /dev/null +++ b/iopaint/model/anytext/anytext_sd15.yaml @@ -0,0 +1,99 @@ +model: + target: iopaint.model.anytext.cldm.cldm.ControlLDM + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "img" + cond_stage_key: "caption" + control_key: "hint" + glyph_key: "glyphs" + position_key: "positions" + image_size: 64 + channels: 4 + cond_stage_trainable: true # need be true when embedding_manager is valid + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + only_mid_control: False + loss_alpha: 0 # perceptual loss, 0.003 + loss_beta: 0 # ctc loss + latin_weight: 1.0 # latin text line may need smaller weigth + with_step_weight: true + use_vae_upsample: true + embedding_manager_config: + target: iopaint.model.anytext.cldm.embedding_manager.EmbeddingManager + params: + valid: true # v6 + emb_type: ocr # ocr, vit, conv + glyph_channels: 1 + position_channels: 1 + add_pos: false + placeholder_string: '*' + + control_stage_config: + target: iopaint.model.anytext.cldm.cldm.ControlNet + params: + image_size: 32 # unused + in_channels: 4 + model_channels: 320 + glyph_channels: 1 + position_channels: 1 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + unet_config: + target: iopaint.model.anytext.cldm.cldm.ControlledUnetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: iopaint.model.anytext.ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedderT3 + params: + version: openai/clip-vit-large-patch14 + use_vision: false # v6 diff --git a/lama_cleaner/tests/__init__.py b/iopaint/model/anytext/cldm/__init__.py similarity index 100% rename from lama_cleaner/tests/__init__.py rename to iopaint/model/anytext/cldm/__init__.py diff --git a/iopaint/model/anytext/cldm/cldm.py b/iopaint/model/anytext/cldm/cldm.py new file mode 100644 index 0000000..ad9692a --- /dev/null +++ b/iopaint/model/anytext/cldm/cldm.py @@ -0,0 +1,630 @@ +import os +from pathlib import Path + +import einops +import torch +import torch as th +import torch.nn as nn +import copy +from easydict import EasyDict as edict + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( + conv_nd, + linear, + zero_module, + timestep_embedding, +) + +from einops import rearrange, repeat +from iopaint.model.anytext.ldm.modules.attention import SpatialTransformer +from iopaint.model.anytext.ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock +from iopaint.model.anytext.ldm.models.diffusion.ddpm import LatentDiffusion +from iopaint.model.anytext.ldm.util import log_txt_as_img, exists, instantiate_from_config +from iopaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler +from iopaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution +from .recognizer import TextRecognizer, create_predictor + +CURRENT_DIR = Path(os.path.dirname(os.path.abspath(__file__))) + + +def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +class ControlledUnetModel(UNetModel): + def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): + hs = [] + with torch.no_grad(): + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + if self.use_fp16: + t_emb = t_emb.half() + emb = self.time_embed(t_emb) + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + + if control is not None: + h += control.pop() + + for i, module in enumerate(self.output_blocks): + if only_mid_control or control is None: + h = torch.cat([h, hs.pop()], dim=1) + else: + h = torch.cat([h, hs.pop() + control.pop()], dim=1) + h = module(h, emb, context) + + h = h.type(x.dtype) + return self.out(h) + + +class ControlNet(nn.Module): + def __init__( + self, + image_size, + in_channels, + model_channels, + glyph_channels, + position_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + disable_self_attentions=None, + num_attention_blocks=None, + disable_middle_self_attn=False, + use_linear_in_transformer=False, + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + self.dims = dims + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + if isinstance(num_res_blocks, int): + self.num_res_blocks = len(channel_mult) * [num_res_blocks] + else: + if len(num_res_blocks) != len(channel_mult): + raise ValueError("provide num_res_blocks either as an int (globally constant) or " + "as a list/tuple (per-level) with the same length as channel_mult") + self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: + # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not + assert len(disable_self_attentions) == len(channel_mult) + if num_attention_blocks is not None: + assert len(num_attention_blocks) == len(self.num_res_blocks) + assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) + print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " + f"This option has LESS priority than attention_resolutions {attention_resolutions}, " + f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " + f"attention will still not be set.") + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.use_checkpoint = use_checkpoint + self.use_fp16 = use_fp16 + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)]) + + self.glyph_block = TimestepEmbedSequential( + conv_nd(dims, glyph_channels, 8, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 8, 8, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 8, 16, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 16, 16, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 16, 32, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 32, 32, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 32, 96, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 96, 96, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 96, 256, 3, padding=1, stride=2), + nn.SiLU(), + ) + + self.position_block = TimestepEmbedSequential( + conv_nd(dims, position_channels, 8, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 8, 8, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 8, 16, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 16, 16, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 16, 32, 3, padding=1, stride=2), + nn.SiLU(), + conv_nd(dims, 32, 32, 3, padding=1), + nn.SiLU(), + conv_nd(dims, 32, 64, 3, padding=1, stride=2), + nn.SiLU(), + ) + + self.fuse_block = zero_module(conv_nd(dims, 256+64+4, model_channels, 3, padding=1)) + + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for nr in range(self.num_res_blocks[level]): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + # num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self.zero_convs.append(self.make_zero_conv(ch)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + self.zero_convs.append(self.make_zero_conv(ch)) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + # num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self.middle_block_out = self.make_zero_conv(ch) + self._feature_size += ch + + def make_zero_conv(self, channels): + return TimestepEmbedSequential(zero_module(conv_nd(self.dims, channels, channels, 1, padding=0))) + + def forward(self, x, hint, text_info, timesteps, context, **kwargs): + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + if self.use_fp16: + t_emb = t_emb.half() + emb = self.time_embed(t_emb) + + # guided_hint from text_info + B, C, H, W = x.shape + glyphs = torch.cat(text_info['glyphs'], dim=1).sum(dim=1, keepdim=True) + positions = torch.cat(text_info['positions'], dim=1).sum(dim=1, keepdim=True) + enc_glyph = self.glyph_block(glyphs, emb, context) + enc_pos = self.position_block(positions, emb, context) + guided_hint = self.fuse_block(torch.cat([enc_glyph, enc_pos, text_info['masked_x']], dim=1)) + + outs = [] + + h = x.type(self.dtype) + for module, zero_conv in zip(self.input_blocks, self.zero_convs): + if guided_hint is not None: + h = module(h, emb, context) + h += guided_hint + guided_hint = None + else: + h = module(h, emb, context) + outs.append(zero_conv(h, emb, context)) + + h = self.middle_block(h, emb, context) + outs.append(self.middle_block_out(h, emb, context)) + + return outs + + +class ControlLDM(LatentDiffusion): + + def __init__(self, control_stage_config, control_key, glyph_key, position_key, only_mid_control, loss_alpha=0, loss_beta=0, with_step_weight=False, use_vae_upsample=False, latin_weight=1.0, embedding_manager_config=None, *args, **kwargs): + self.use_fp16 = kwargs.pop('use_fp16', False) + super().__init__(*args, **kwargs) + self.control_model = instantiate_from_config(control_stage_config) + self.control_key = control_key + self.glyph_key = glyph_key + self.position_key = position_key + self.only_mid_control = only_mid_control + self.control_scales = [1.0] * 13 + self.loss_alpha = loss_alpha + self.loss_beta = loss_beta + self.with_step_weight = with_step_weight + self.use_vae_upsample = use_vae_upsample + self.latin_weight = latin_weight + + if embedding_manager_config is not None and embedding_manager_config.params.valid: + self.embedding_manager = self.instantiate_embedding_manager(embedding_manager_config, self.cond_stage_model) + for param in self.embedding_manager.embedding_parameters(): + param.requires_grad = True + else: + self.embedding_manager = None + if self.loss_alpha > 0 or self.loss_beta > 0 or self.embedding_manager: + if embedding_manager_config.params.emb_type == 'ocr': + self.text_predictor = create_predictor().eval() + args = edict() + args.rec_image_shape = "3, 48, 320" + args.rec_batch_num = 6 + args.rec_char_dict_path = str(CURRENT_DIR.parent / "ocr_recog" / "ppocr_keys_v1.txt") + args.use_fp16 = self.use_fp16 + self.cn_recognizer = TextRecognizer(args, self.text_predictor) + for param in self.text_predictor.parameters(): + param.requires_grad = False + if self.embedding_manager: + self.embedding_manager.recog = self.cn_recognizer + + @torch.no_grad() + def get_input(self, batch, k, bs=None, *args, **kwargs): + if self.embedding_manager is None: # fill in full caption + self.fill_caption(batch) + x, c, mx = super().get_input(batch, self.first_stage_key, mask_k='masked_img', *args, **kwargs) + control = batch[self.control_key] # for log_images and loss_alpha, not real control + if bs is not None: + control = control[:bs] + control = control.to(self.device) + control = einops.rearrange(control, 'b h w c -> b c h w') + control = control.to(memory_format=torch.contiguous_format).float() + + inv_mask = batch['inv_mask'] + if bs is not None: + inv_mask = inv_mask[:bs] + inv_mask = inv_mask.to(self.device) + inv_mask = einops.rearrange(inv_mask, 'b h w c -> b c h w') + inv_mask = inv_mask.to(memory_format=torch.contiguous_format).float() + + glyphs = batch[self.glyph_key] + gly_line = batch['gly_line'] + positions = batch[self.position_key] + n_lines = batch['n_lines'] + language = batch['language'] + texts = batch['texts'] + assert len(glyphs) == len(positions) + for i in range(len(glyphs)): + if bs is not None: + glyphs[i] = glyphs[i][:bs] + gly_line[i] = gly_line[i][:bs] + positions[i] = positions[i][:bs] + n_lines = n_lines[:bs] + glyphs[i] = glyphs[i].to(self.device) + gly_line[i] = gly_line[i].to(self.device) + positions[i] = positions[i].to(self.device) + glyphs[i] = einops.rearrange(glyphs[i], 'b h w c -> b c h w') + gly_line[i] = einops.rearrange(gly_line[i], 'b h w c -> b c h w') + positions[i] = einops.rearrange(positions[i], 'b h w c -> b c h w') + glyphs[i] = glyphs[i].to(memory_format=torch.contiguous_format).float() + gly_line[i] = gly_line[i].to(memory_format=torch.contiguous_format).float() + positions[i] = positions[i].to(memory_format=torch.contiguous_format).float() + info = {} + info['glyphs'] = glyphs + info['positions'] = positions + info['n_lines'] = n_lines + info['language'] = language + info['texts'] = texts + info['img'] = batch['img'] # nhwc, (-1,1) + info['masked_x'] = mx + info['gly_line'] = gly_line + info['inv_mask'] = inv_mask + return x, dict(c_crossattn=[c], c_concat=[control], text_info=info) + + def apply_model(self, x_noisy, t, cond, *args, **kwargs): + assert isinstance(cond, dict) + diffusion_model = self.model.diffusion_model + _cond = torch.cat(cond['c_crossattn'], 1) + _hint = torch.cat(cond['c_concat'], 1) + if self.use_fp16: + x_noisy = x_noisy.half() + control = self.control_model(x=x_noisy, timesteps=t, context=_cond, hint=_hint, text_info=cond['text_info']) + control = [c * scale for c, scale in zip(control, self.control_scales)] + eps = diffusion_model(x=x_noisy, timesteps=t, context=_cond, control=control, only_mid_control=self.only_mid_control) + + return eps + + def instantiate_embedding_manager(self, config, embedder): + model = instantiate_from_config(config, embedder=embedder) + return model + + @torch.no_grad() + def get_unconditional_conditioning(self, N): + return self.get_learned_conditioning(dict(c_crossattn=[[""] * N], text_info=None)) + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + if self.embedding_manager is not None and c['text_info'] is not None: + self.embedding_manager.encode_text(c['text_info']) + if isinstance(c, dict): + cond_txt = c['c_crossattn'][0] + else: + cond_txt = c + if self.embedding_manager is not None: + cond_txt = self.cond_stage_model.encode(cond_txt, embedding_manager=self.embedding_manager) + else: + cond_txt = self.cond_stage_model.encode(cond_txt) + if isinstance(c, dict): + c['c_crossattn'][0] = cond_txt + else: + c = cond_txt + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def fill_caption(self, batch, place_holder='*'): + bs = len(batch['n_lines']) + cond_list = copy.deepcopy(batch[self.cond_stage_key]) + for i in range(bs): + n_lines = batch['n_lines'][i] + if n_lines == 0: + continue + cur_cap = cond_list[i] + for j in range(n_lines): + r_txt = batch['texts'][j][i] + cur_cap = cur_cap.replace(place_holder, f'"{r_txt}"', 1) + cond_list[i] = cur_cap + batch[self.cond_stage_key] = cond_list + + @torch.no_grad() + def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs): + use_ddim = ddim_steps is not None + + log = dict() + z, c = self.get_input(batch, self.first_stage_key, bs=N) + if self.cond_stage_trainable: + with torch.no_grad(): + c = self.get_learned_conditioning(c) + c_crossattn = c["c_crossattn"][0][:N] + c_cat = c["c_concat"][0][:N] + text_info = c["text_info"] + text_info['glyphs'] = [i[:N] for i in text_info['glyphs']] + text_info['gly_line'] = [i[:N] for i in text_info['gly_line']] + text_info['positions'] = [i[:N] for i in text_info['positions']] + text_info['n_lines'] = text_info['n_lines'][:N] + text_info['masked_x'] = text_info['masked_x'][:N] + text_info['img'] = text_info['img'][:N] + + N = min(z.shape[0], N) + n_row = min(z.shape[0], n_row) + log["reconstruction"] = self.decode_first_stage(z) + log["masked_image"] = self.decode_first_stage(text_info['masked_x']) + log["control"] = c_cat * 2.0 - 1.0 + log["img"] = text_info['img'].permute(0, 3, 1, 2) # log source image if needed + # get glyph + glyph_bs = torch.stack(text_info['glyphs']) + glyph_bs = torch.sum(glyph_bs, dim=0) * 2.0 - 1.0 + log["glyph"] = torch.nn.functional.interpolate(glyph_bs, size=(512, 512), mode='bilinear', align_corners=True,) + # fill caption + if not self.embedding_manager: + self.fill_caption(batch) + captions = batch[self.cond_stage_key] + log["conditioning"] = log_txt_as_img((512, 512), captions, size=16) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c], "text_info": text_info}, + batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_cross = self.get_unconditional_conditioning(N) + uc_cat = c_cat # torch.zeros_like(c_cat) + uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross['c_crossattn'][0]], "text_info": text_info} + samples_cfg, tmps = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c_crossattn], "text_info": text_info}, + batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc_full, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + pred_x0 = False # wether log pred_x0 + if pred_x0: + for idx in range(len(tmps['pred_x0'])): + pred_x0 = self.decode_first_stage(tmps['pred_x0'][idx]) + log[f"pred_x0_{tmps['index'][idx]}"] = pred_x0 + + return log + + @torch.no_grad() + def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): + ddim_sampler = DDIMSampler(self) + b, c, h, w = cond["c_concat"][0].shape + shape = (self.channels, h // 8, w // 8) + samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, log_every_t=5, **kwargs) + return samples, intermediates + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.control_model.parameters()) + if self.embedding_manager: + params += list(self.embedding_manager.embedding_parameters()) + if not self.sd_locked: + # params += list(self.model.diffusion_model.input_blocks.parameters()) + # params += list(self.model.diffusion_model.middle_block.parameters()) + params += list(self.model.diffusion_model.output_blocks.parameters()) + params += list(self.model.diffusion_model.out.parameters()) + if self.unlockKV: + nCount = 0 + for name, param in self.model.diffusion_model.named_parameters(): + if 'attn2.to_k' in name or 'attn2.to_v' in name: + params += [param] + nCount += 1 + print(f'Cross attention is unlocked, and {nCount} Wk or Wv are added to potimizers!!!') + + opt = torch.optim.AdamW(params, lr=lr) + return opt + + def low_vram_shift(self, is_diffusing): + if is_diffusing: + self.model = self.model.cuda() + self.control_model = self.control_model.cuda() + self.first_stage_model = self.first_stage_model.cpu() + self.cond_stage_model = self.cond_stage_model.cpu() + else: + self.model = self.model.cpu() + self.control_model = self.control_model.cpu() + self.first_stage_model = self.first_stage_model.cuda() + self.cond_stage_model = self.cond_stage_model.cuda() diff --git a/iopaint/model/anytext/cldm/ddim_hacked.py b/iopaint/model/anytext/cldm/ddim_hacked.py new file mode 100644 index 0000000..b23a883 --- /dev/null +++ b/iopaint/model/anytext/cldm/ddim_hacked.py @@ -0,0 +1,486 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( + make_ddim_sampling_parameters, + make_ddim_timesteps, + noise_like, + extract_into_tensor, +) + + +class DDIMSampler(object): + def __init__(self, model, device, schedule="linear", **kwargs): + super().__init__() + self.device = device + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device(self.device): + attr = attr.to(torch.device(self.device)) + setattr(self, name, attr) + + def make_schedule( + self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True + ): + self.ddim_timesteps = make_ddim_timesteps( + ddim_discr_method=ddim_discretize, + num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps, + verbose=verbose, + ) + alphas_cumprod = self.model.alphas_cumprod + assert ( + alphas_cumprod.shape[0] == self.ddpm_num_timesteps + ), "alphas have to be defined for each timestep" + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.device) + + self.register_buffer("betas", to_torch(self.model.betas)) + self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) + self.register_buffer( + "alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev) + ) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer( + "sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu())) + ) + self.register_buffer( + "sqrt_one_minus_alphas_cumprod", + to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), + ) + self.register_buffer( + "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu())) + ) + self.register_buffer( + "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())) + ) + self.register_buffer( + "sqrt_recipm1_alphas_cumprod", + to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), + ) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters( + alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta, + verbose=verbose, + ) + self.register_buffer("ddim_sigmas", ddim_sigmas) + self.register_buffer("ddim_alphas", ddim_alphas) + self.register_buffer("ddim_alphas_prev", ddim_alphas_prev) + self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) + / (1 - self.alphas_cumprod) + * (1 - self.alphas_cumprod / self.alphas_cumprod_prev) + ) + self.register_buffer( + "ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps + ) + + @torch.no_grad() + def sample( + self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0.0, + mask=None, + x0=None, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + dynamic_threshold=None, + ucg_schedule=None, + **kwargs, + ): + if conditioning is not None: + if isinstance(conditioning, dict): + ctmp = conditioning[list(conditioning.keys())[0]] + while isinstance(ctmp, list): + ctmp = ctmp[0] + cbs = ctmp.shape[0] + if cbs != batch_size: + print( + f"Warning: Got {cbs} conditionings but batch-size is {batch_size}" + ) + + elif isinstance(conditioning, list): + for ctmp in conditioning: + if ctmp.shape[0] != batch_size: + print( + f"Warning: Got {cbs} conditionings but batch-size is {batch_size}" + ) + + else: + if conditioning.shape[0] != batch_size: + print( + f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}" + ) + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f"Data shape for DDIM sampling is {size}, eta {eta}") + + samples, intermediates = self.ddim_sampling( + conditioning, + size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, + x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ucg_schedule=ucg_schedule, + ) + return samples, intermediates + + @torch.no_grad() + def ddim_sampling( + self, + cond, + shape, + x_T=None, + ddim_use_original_steps=False, + callback=None, + timesteps=None, + quantize_denoised=False, + mask=None, + x0=None, + img_callback=None, + log_every_t=100, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + dynamic_threshold=None, + ucg_schedule=None, + ): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = ( + self.ddpm_num_timesteps + if ddim_use_original_steps + else self.ddim_timesteps + ) + elif timesteps is not None and not ddim_use_original_steps: + subset_end = ( + int( + min(timesteps / self.ddim_timesteps.shape[0], 1) + * self.ddim_timesteps.shape[0] + ) + - 1 + ) + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {"x_inter": [img], "pred_x0": [img]} + time_range = ( + reversed(range(0, timesteps)) + if ddim_use_original_steps + else np.flip(timesteps) + ) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample( + x0, ts + ) # TODO: deterministic forward pass? + img = img_orig * mask + (1.0 - mask) * img + + if ucg_schedule is not None: + assert len(ucg_schedule) == len(time_range) + unconditional_guidance_scale = ucg_schedule[i] + + outs = self.p_sample_ddim( + img, + cond, + ts, + index=index, + use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, + temperature=temperature, + noise_dropout=noise_dropout, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ) + img, pred_x0 = outs + if callback: + callback(None, i, None, None) + if img_callback: + img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates["x_inter"].append(img) + intermediates["pred_x0"].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_ddim( + self, + x, + c, + t, + index, + repeat_noise=False, + use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + dynamic_threshold=None, + ): + b, *_, device = *x.shape, x.device + + if unconditional_conditioning is None or unconditional_guidance_scale == 1.0: + model_output = self.model.apply_model(x, t, c) + else: + model_t = self.model.apply_model(x, t, c) + model_uncond = self.model.apply_model(x, t, unconditional_conditioning) + model_output = model_uncond + unconditional_guidance_scale * ( + model_t - model_uncond + ) + + if self.model.parameterization == "v": + e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) + else: + e_t = model_output + + if score_corrector is not None: + assert self.model.parameterization == "eps", "not implemented" + e_t = score_corrector.modify_score( + self.model, e_t, x, t, c, **corrector_kwargs + ) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = ( + self.model.alphas_cumprod_prev + if use_original_steps + else self.ddim_alphas_prev + ) + sqrt_one_minus_alphas = ( + self.model.sqrt_one_minus_alphas_cumprod + if use_original_steps + else self.ddim_sqrt_one_minus_alphas + ) + sigmas = ( + self.model.ddim_sigmas_for_original_num_steps + if use_original_steps + else self.ddim_sigmas + ) + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full( + (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device + ) + + # current prediction for x_0 + if self.model.parameterization != "v": + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + else: + pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) + + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + + if dynamic_threshold is not None: + raise NotImplementedError() + + # direction pointing to x_t + dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.0: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + @torch.no_grad() + def encode( + self, + x0, + c, + t_enc, + use_original_steps=False, + return_intermediates=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + callback=None, + ): + timesteps = ( + np.arange(self.ddpm_num_timesteps) + if use_original_steps + else self.ddim_timesteps + ) + num_reference_steps = timesteps.shape[0] + + assert t_enc <= num_reference_steps + num_steps = t_enc + + if use_original_steps: + alphas_next = self.alphas_cumprod[:num_steps] + alphas = self.alphas_cumprod_prev[:num_steps] + else: + alphas_next = self.ddim_alphas[:num_steps] + alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) + + x_next = x0 + intermediates = [] + inter_steps = [] + for i in tqdm(range(num_steps), desc="Encoding Image"): + t = torch.full( + (x0.shape[0],), timesteps[i], device=self.model.device, dtype=torch.long + ) + if unconditional_guidance_scale == 1.0: + noise_pred = self.model.apply_model(x_next, t, c) + else: + assert unconditional_conditioning is not None + e_t_uncond, noise_pred = torch.chunk( + self.model.apply_model( + torch.cat((x_next, x_next)), + torch.cat((t, t)), + torch.cat((unconditional_conditioning, c)), + ), + 2, + ) + noise_pred = e_t_uncond + unconditional_guidance_scale * ( + noise_pred - e_t_uncond + ) + + xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next + weighted_noise_pred = ( + alphas_next[i].sqrt() + * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) + * noise_pred + ) + x_next = xt_weighted + weighted_noise_pred + if ( + return_intermediates + and i % (num_steps // return_intermediates) == 0 + and i < num_steps - 1 + ): + intermediates.append(x_next) + inter_steps.append(i) + elif return_intermediates and i >= num_steps - 2: + intermediates.append(x_next) + inter_steps.append(i) + if callback: + callback(i) + + out = {"x_encoded": x_next, "intermediate_steps": inter_steps} + if return_intermediates: + out.update({"intermediates": intermediates}) + return x_next, out + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return ( + extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise + ) + + @torch.no_grad() + def decode( + self, + x_latent, + cond, + t_start, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + use_original_steps=False, + callback=None, + ): + timesteps = ( + np.arange(self.ddpm_num_timesteps) + if use_original_steps + else self.ddim_timesteps + ) + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc="Decoding image", total=total_steps) + x_dec = x_latent + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full( + (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long + ) + x_dec, _ = self.p_sample_ddim( + x_dec, + cond, + ts, + index=index, + use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + if callback: + callback(i) + return x_dec diff --git a/iopaint/model/anytext/cldm/embedding_manager.py b/iopaint/model/anytext/cldm/embedding_manager.py new file mode 100644 index 0000000..6ccf8a9 --- /dev/null +++ b/iopaint/model/anytext/cldm/embedding_manager.py @@ -0,0 +1,165 @@ +''' +Copyright (c) Alibaba, Inc. and its affiliates. +''' +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import conv_nd, linear + + +def get_clip_token_for_string(tokenizer, string): + batch_encoding = tokenizer(string, truncation=True, max_length=77, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"] + assert torch.count_nonzero(tokens - 49407) == 2, f"String '{string}' maps to more than a single token. Please use another string" + return tokens[0, 1] + + +def get_bert_token_for_string(tokenizer, string): + token = tokenizer(string) + assert torch.count_nonzero(token) == 3, f"String '{string}' maps to more than a single token. Please use another string" + token = token[0, 1] + return token + + +def get_clip_vision_emb(encoder, processor, img): + _img = img.repeat(1, 3, 1, 1)*255 + inputs = processor(images=_img, return_tensors="pt") + inputs['pixel_values'] = inputs['pixel_values'].to(img.device) + outputs = encoder(**inputs) + emb = outputs.image_embeds + return emb + + +def get_recog_emb(encoder, img_list): + _img_list = [(img.repeat(1, 3, 1, 1)*255)[0] for img in img_list] + encoder.predictor.eval() + _, preds_neck = encoder.pred_imglist(_img_list, show_debug=False) + return preds_neck + + +def pad_H(x): + _, _, H, W = x.shape + p_top = (W - H) // 2 + p_bot = W - H - p_top + return F.pad(x, (0, 0, p_top, p_bot)) + + +class EncodeNet(nn.Module): + def __init__(self, in_channels, out_channels): + super(EncodeNet, self).__init__() + chan = 16 + n_layer = 4 # downsample + + self.conv1 = conv_nd(2, in_channels, chan, 3, padding=1) + self.conv_list = nn.ModuleList([]) + _c = chan + for i in range(n_layer): + self.conv_list.append(conv_nd(2, _c, _c*2, 3, padding=1, stride=2)) + _c *= 2 + self.conv2 = conv_nd(2, _c, out_channels, 3, padding=1) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.act = nn.SiLU() + + def forward(self, x): + x = self.act(self.conv1(x)) + for layer in self.conv_list: + x = self.act(layer(x)) + x = self.act(self.conv2(x)) + x = self.avgpool(x) + x = x.view(x.size(0), -1) + return x + + +class EmbeddingManager(nn.Module): + def __init__( + self, + embedder, + valid=True, + glyph_channels=20, + position_channels=1, + placeholder_string='*', + add_pos=False, + emb_type='ocr', + **kwargs + ): + super().__init__() + if hasattr(embedder, 'tokenizer'): # using Stable Diffusion's CLIP encoder + get_token_for_string = partial(get_clip_token_for_string, embedder.tokenizer) + token_dim = 768 + if hasattr(embedder, 'vit'): + assert emb_type == 'vit' + self.get_vision_emb = partial(get_clip_vision_emb, embedder.vit, embedder.processor) + self.get_recog_emb = None + else: # using LDM's BERT encoder + get_token_for_string = partial(get_bert_token_for_string, embedder.tknz_fn) + token_dim = 1280 + self.token_dim = token_dim + self.emb_type = emb_type + + self.add_pos = add_pos + if add_pos: + self.position_encoder = EncodeNet(position_channels, token_dim) + if emb_type == 'ocr': + self.proj = linear(40*64, token_dim) + if emb_type == 'conv': + self.glyph_encoder = EncodeNet(glyph_channels, token_dim) + + self.placeholder_token = get_token_for_string(placeholder_string) + + def encode_text(self, text_info): + if self.get_recog_emb is None and self.emb_type == 'ocr': + self.get_recog_emb = partial(get_recog_emb, self.recog) + + gline_list = [] + pos_list = [] + for i in range(len(text_info['n_lines'])): # sample index in a batch + n_lines = text_info['n_lines'][i] + for j in range(n_lines): # line + gline_list += [text_info['gly_line'][j][i:i+1]] + if self.add_pos: + pos_list += [text_info['positions'][j][i:i+1]] + + if len(gline_list) > 0: + if self.emb_type == 'ocr': + recog_emb = self.get_recog_emb(gline_list) + enc_glyph = self.proj(recog_emb.reshape(recog_emb.shape[0], -1)) + elif self.emb_type == 'vit': + enc_glyph = self.get_vision_emb(pad_H(torch.cat(gline_list, dim=0))) + elif self.emb_type == 'conv': + enc_glyph = self.glyph_encoder(pad_H(torch.cat(gline_list, dim=0))) + if self.add_pos: + enc_pos = self.position_encoder(torch.cat(gline_list, dim=0)) + enc_glyph = enc_glyph+enc_pos + + self.text_embs_all = [] + n_idx = 0 + for i in range(len(text_info['n_lines'])): # sample index in a batch + n_lines = text_info['n_lines'][i] + text_embs = [] + for j in range(n_lines): # line + text_embs += [enc_glyph[n_idx:n_idx+1]] + n_idx += 1 + self.text_embs_all += [text_embs] + + def forward( + self, + tokenized_text, + embedded_text, + ): + b, device = tokenized_text.shape[0], tokenized_text.device + for i in range(b): + idx = tokenized_text[i] == self.placeholder_token.to(device) + if sum(idx) > 0: + if i >= len(self.text_embs_all): + print('truncation for log images...') + break + text_emb = torch.cat(self.text_embs_all[i], dim=0) + if sum(idx) != len(text_emb): + print('truncation for long caption...') + embedded_text[i][idx] = text_emb[:sum(idx)] + return embedded_text + + def embedding_parameters(self): + return self.parameters() diff --git a/iopaint/model/anytext/cldm/hack.py b/iopaint/model/anytext/cldm/hack.py new file mode 100644 index 0000000..05afe5f --- /dev/null +++ b/iopaint/model/anytext/cldm/hack.py @@ -0,0 +1,111 @@ +import torch +import einops + +import iopaint.model.anytext.ldm.modules.encoders.modules +import iopaint.model.anytext.ldm.modules.attention + +from transformers import logging +from iopaint.model.anytext.ldm.modules.attention import default + + +def disable_verbosity(): + logging.set_verbosity_error() + print('logging improved.') + return + + +def enable_sliced_attention(): + iopaint.model.anytext.ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward + print('Enabled sliced_attention.') + return + + +def hack_everything(clip_skip=0): + disable_verbosity() + iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward + iopaint.model.anytext.ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip + print('Enabled clip hacks.') + return + + +# Written by Lvmin +def _hacked_clip_forward(self, text): + PAD = self.tokenizer.pad_token_id + EOS = self.tokenizer.eos_token_id + BOS = self.tokenizer.bos_token_id + + def tokenize(t): + return self.tokenizer(t, truncation=False, add_special_tokens=False)["input_ids"] + + def transformer_encode(t): + if self.clip_skip > 1: + rt = self.transformer(input_ids=t, output_hidden_states=True) + return self.transformer.text_model.final_layer_norm(rt.hidden_states[-self.clip_skip]) + else: + return self.transformer(input_ids=t, output_hidden_states=False).last_hidden_state + + def split(x): + return x[75 * 0: 75 * 1], x[75 * 1: 75 * 2], x[75 * 2: 75 * 3] + + def pad(x, p, i): + return x[:i] if len(x) >= i else x + [p] * (i - len(x)) + + raw_tokens_list = tokenize(text) + tokens_list = [] + + for raw_tokens in raw_tokens_list: + raw_tokens_123 = split(raw_tokens) + raw_tokens_123 = [[BOS] + raw_tokens_i + [EOS] for raw_tokens_i in raw_tokens_123] + raw_tokens_123 = [pad(raw_tokens_i, PAD, 77) for raw_tokens_i in raw_tokens_123] + tokens_list.append(raw_tokens_123) + + tokens_list = torch.IntTensor(tokens_list).to(self.device) + + feed = einops.rearrange(tokens_list, 'b f i -> (b f) i') + y = transformer_encode(feed) + z = einops.rearrange(y, '(b f) i c -> b (f i) c', f=3) + + return z + + +# Stolen from https://github.com/basujindal/stable-diffusion/blob/main/optimizedSD/splitAttention.py +def _hacked_sliced_attentin_forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + del context, x + + q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + limit = k.shape[0] + att_step = 1 + q_chunks = list(torch.tensor_split(q, limit // att_step, dim=0)) + k_chunks = list(torch.tensor_split(k, limit // att_step, dim=0)) + v_chunks = list(torch.tensor_split(v, limit // att_step, dim=0)) + + q_chunks.reverse() + k_chunks.reverse() + v_chunks.reverse() + sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) + del k, q, v + for i in range(0, limit, att_step): + q_buffer = q_chunks.pop() + k_buffer = k_chunks.pop() + v_buffer = v_chunks.pop() + sim_buffer = torch.einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale + + del k_buffer, q_buffer + # attention, what we cannot get enough of, by chunks + + sim_buffer = sim_buffer.softmax(dim=-1) + + sim_buffer = torch.einsum('b i j, b j d -> b i d', sim_buffer, v_buffer) + del v_buffer + sim[i:i + att_step, :, :] = sim_buffer + + del sim_buffer + sim = einops.rearrange(sim, '(b h) n d -> b n (h d)', h=h) + return self.to_out(sim) diff --git a/iopaint/model/anytext/cldm/model.py b/iopaint/model/anytext/cldm/model.py new file mode 100644 index 0000000..688f2ed --- /dev/null +++ b/iopaint/model/anytext/cldm/model.py @@ -0,0 +1,40 @@ +import os +import torch + +from omegaconf import OmegaConf +from iopaint.model.anytext.ldm.util import instantiate_from_config + + +def get_state_dict(d): + return d.get("state_dict", d) + + +def load_state_dict(ckpt_path, location="cpu"): + _, extension = os.path.splitext(ckpt_path) + if extension.lower() == ".safetensors": + import safetensors.torch + + state_dict = safetensors.torch.load_file(ckpt_path, device=location) + else: + state_dict = get_state_dict( + torch.load(ckpt_path, map_location=torch.device(location)) + ) + state_dict = get_state_dict(state_dict) + print(f"Loaded state_dict from [{ckpt_path}]") + return state_dict + + +def create_model(config_path, device, cond_stage_path=None, use_fp16=False): + config = OmegaConf.load(config_path) + # if cond_stage_path: + # config.model.params.cond_stage_config.params.version = ( + # cond_stage_path # use pre-downloaded ckpts, in case blocked + # ) + config.model.params.cond_stage_config.params.device = str(device) + if use_fp16: + config.model.params.use_fp16 = True + config.model.params.control_stage_config.params.use_fp16 = True + config.model.params.unet_config.params.use_fp16 = True + model = instantiate_from_config(config.model).cpu() + print(f"Loaded model config from [{config_path}]") + return model diff --git a/iopaint/model/anytext/cldm/recognizer.py b/iopaint/model/anytext/cldm/recognizer.py new file mode 100755 index 0000000..0621512 --- /dev/null +++ b/iopaint/model/anytext/cldm/recognizer.py @@ -0,0 +1,300 @@ +""" +Copyright (c) Alibaba, Inc. and its affiliates. +""" +import os +import cv2 +import numpy as np +import math +import traceback +from easydict import EasyDict as edict +import time +from iopaint.model.anytext.ocr_recog.RecModel import RecModel +import torch +import torch.nn.functional as F + + +def min_bounding_rect(img): + ret, thresh = cv2.threshold(img, 127, 255, 0) + contours, hierarchy = cv2.findContours( + thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) + if len(contours) == 0: + print("Bad contours, using fake bbox...") + return np.array([[0, 0], [100, 0], [100, 100], [0, 100]]) + max_contour = max(contours, key=cv2.contourArea) + rect = cv2.minAreaRect(max_contour) + box = cv2.boxPoints(rect) + box = np.int0(box) + # sort + x_sorted = sorted(box, key=lambda x: x[0]) + left = x_sorted[:2] + right = x_sorted[2:] + left = sorted(left, key=lambda x: x[1]) + (tl, bl) = left + right = sorted(right, key=lambda x: x[1]) + (tr, br) = right + if tl[1] > bl[1]: + (tl, bl) = (bl, tl) + if tr[1] > br[1]: + (tr, br) = (br, tr) + return np.array([tl, tr, br, bl]) + + +def create_predictor(model_dir=None, model_lang="ch", is_onnx=False): + model_file_path = model_dir + if model_file_path is not None and not os.path.exists(model_file_path): + raise ValueError("not find model file path {}".format(model_file_path)) + + if is_onnx: + import onnxruntime as ort + + sess = ort.InferenceSession( + model_file_path, providers=["CPUExecutionProvider"] + ) # 'TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider' + return sess + else: + if model_lang == "ch": + n_class = 6625 + elif model_lang == "en": + n_class = 97 + else: + raise ValueError(f"Unsupported OCR recog model_lang: {model_lang}") + rec_config = edict( + in_channels=3, + backbone=edict( + type="MobileNetV1Enhance", + scale=0.5, + last_conv_stride=[1, 2], + last_pool_type="avg", + ), + neck=edict( + type="SequenceEncoder", + encoder_type="svtr", + dims=64, + depth=2, + hidden_dims=120, + use_guide=True, + ), + head=edict( + type="CTCHead", + fc_decay=0.00001, + out_channels=n_class, + return_feats=True, + ), + ) + + rec_model = RecModel(rec_config) + if model_file_path is not None: + rec_model.load_state_dict(torch.load(model_file_path, map_location="cpu")) + rec_model.eval() + return rec_model.eval() + + +def _check_image_file(path): + img_end = {"jpg", "bmp", "png", "jpeg", "rgb", "tif", "tiff"} + return any([path.lower().endswith(e) for e in img_end]) + + +def get_image_file_list(img_file): + imgs_lists = [] + if img_file is None or not os.path.exists(img_file): + raise Exception("not found any img file in {}".format(img_file)) + if os.path.isfile(img_file) and _check_image_file(img_file): + imgs_lists.append(img_file) + elif os.path.isdir(img_file): + for single_file in os.listdir(img_file): + file_path = os.path.join(img_file, single_file) + if os.path.isfile(file_path) and _check_image_file(file_path): + imgs_lists.append(file_path) + if len(imgs_lists) == 0: + raise Exception("not found any img file in {}".format(img_file)) + imgs_lists = sorted(imgs_lists) + return imgs_lists + + +class TextRecognizer(object): + def __init__(self, args, predictor): + self.rec_image_shape = [int(v) for v in args.rec_image_shape.split(",")] + self.rec_batch_num = args.rec_batch_num + self.predictor = predictor + self.chars = self.get_char_dict(args.rec_char_dict_path) + self.char2id = {x: i for i, x in enumerate(self.chars)} + self.is_onnx = not isinstance(self.predictor, torch.nn.Module) + self.use_fp16 = args.use_fp16 + + # img: CHW + def resize_norm_img(self, img, max_wh_ratio): + imgC, imgH, imgW = self.rec_image_shape + assert imgC == img.shape[0] + imgW = int((imgH * max_wh_ratio)) + + h, w = img.shape[1:] + ratio = w / float(h) + if math.ceil(imgH * ratio) > imgW: + resized_w = imgW + else: + resized_w = int(math.ceil(imgH * ratio)) + resized_image = torch.nn.functional.interpolate( + img.unsqueeze(0), + size=(imgH, resized_w), + mode="bilinear", + align_corners=True, + ) + resized_image /= 255.0 + resized_image -= 0.5 + resized_image /= 0.5 + padding_im = torch.zeros((imgC, imgH, imgW), dtype=torch.float32).to(img.device) + padding_im[:, :, 0:resized_w] = resized_image[0] + return padding_im + + # img_list: list of tensors with shape chw 0-255 + def pred_imglist(self, img_list, show_debug=False, is_ori=False): + img_num = len(img_list) + assert img_num > 0 + # Calculate the aspect ratio of all text bars + width_list = [] + for img in img_list: + width_list.append(img.shape[2] / float(img.shape[1])) + # Sorting can speed up the recognition process + indices = torch.from_numpy(np.argsort(np.array(width_list))) + batch_num = self.rec_batch_num + preds_all = [None] * img_num + preds_neck_all = [None] * img_num + for beg_img_no in range(0, img_num, batch_num): + end_img_no = min(img_num, beg_img_no + batch_num) + norm_img_batch = [] + + imgC, imgH, imgW = self.rec_image_shape[:3] + max_wh_ratio = imgW / imgH + for ino in range(beg_img_no, end_img_no): + h, w = img_list[indices[ino]].shape[1:] + if h > w * 1.2: + img = img_list[indices[ino]] + img = torch.transpose(img, 1, 2).flip(dims=[1]) + img_list[indices[ino]] = img + h, w = img.shape[1:] + # wh_ratio = w * 1.0 / h + # max_wh_ratio = max(max_wh_ratio, wh_ratio) # comment to not use different ratio + for ino in range(beg_img_no, end_img_no): + norm_img = self.resize_norm_img(img_list[indices[ino]], max_wh_ratio) + if self.use_fp16: + norm_img = norm_img.half() + norm_img = norm_img.unsqueeze(0) + norm_img_batch.append(norm_img) + norm_img_batch = torch.cat(norm_img_batch, dim=0) + if show_debug: + for i in range(len(norm_img_batch)): + _img = norm_img_batch[i].permute(1, 2, 0).detach().cpu().numpy() + _img = (_img + 0.5) * 255 + _img = _img[:, :, ::-1] + file_name = f"{indices[beg_img_no + i]}" + file_name = file_name + "_ori" if is_ori else file_name + cv2.imwrite(file_name + ".jpg", _img) + if self.is_onnx: + input_dict = {} + input_dict[self.predictor.get_inputs()[0].name] = ( + norm_img_batch.detach().cpu().numpy() + ) + outputs = self.predictor.run(None, input_dict) + preds = {} + preds["ctc"] = torch.from_numpy(outputs[0]) + preds["ctc_neck"] = [torch.zeros(1)] * img_num + else: + preds = self.predictor(norm_img_batch) + for rno in range(preds["ctc"].shape[0]): + preds_all[indices[beg_img_no + rno]] = preds["ctc"][rno] + preds_neck_all[indices[beg_img_no + rno]] = preds["ctc_neck"][rno] + + return torch.stack(preds_all, dim=0), torch.stack(preds_neck_all, dim=0) + + def get_char_dict(self, character_dict_path): + character_str = [] + with open(character_dict_path, "rb") as fin: + lines = fin.readlines() + for line in lines: + line = line.decode("utf-8").strip("\n").strip("\r\n") + character_str.append(line) + dict_character = list(character_str) + dict_character = ["sos"] + dict_character + [" "] # eos is space + return dict_character + + def get_text(self, order): + char_list = [self.chars[text_id] for text_id in order] + return "".join(char_list) + + def decode(self, mat): + text_index = mat.detach().cpu().numpy().argmax(axis=1) + ignored_tokens = [0] + selection = np.ones(len(text_index), dtype=bool) + selection[1:] = text_index[1:] != text_index[:-1] + for ignored_token in ignored_tokens: + selection &= text_index != ignored_token + return text_index[selection], np.where(selection)[0] + + def get_ctcloss(self, preds, gt_text, weight): + if not isinstance(weight, torch.Tensor): + weight = torch.tensor(weight).to(preds.device) + ctc_loss = torch.nn.CTCLoss(reduction="none") + log_probs = preds.log_softmax(dim=2).permute(1, 0, 2) # NTC-->TNC + targets = [] + target_lengths = [] + for t in gt_text: + targets += [self.char2id.get(i, len(self.chars) - 1) for i in t] + target_lengths += [len(t)] + targets = torch.tensor(targets).to(preds.device) + target_lengths = torch.tensor(target_lengths).to(preds.device) + input_lengths = torch.tensor([log_probs.shape[0]] * (log_probs.shape[1])).to( + preds.device + ) + loss = ctc_loss(log_probs, targets, input_lengths, target_lengths) + loss = loss / input_lengths * weight + return loss + + +def main(): + rec_model_dir = "./ocr_weights/ppv3_rec.pth" + predictor = create_predictor(rec_model_dir) + args = edict() + args.rec_image_shape = "3, 48, 320" + args.rec_char_dict_path = "./ocr_weights/ppocr_keys_v1.txt" + args.rec_batch_num = 6 + text_recognizer = TextRecognizer(args, predictor) + image_dir = "./test_imgs_cn" + gt_text = ["韩国小馆"] * 14 + + image_file_list = get_image_file_list(image_dir) + valid_image_file_list = [] + img_list = [] + + for image_file in image_file_list: + img = cv2.imread(image_file) + if img is None: + print("error in loading image:{}".format(image_file)) + continue + valid_image_file_list.append(image_file) + img_list.append(torch.from_numpy(img).permute(2, 0, 1).float()) + try: + tic = time.time() + times = [] + for i in range(10): + preds, _ = text_recognizer.pred_imglist(img_list) # get text + preds_all = preds.softmax(dim=2) + times += [(time.time() - tic) * 1000.0] + tic = time.time() + print(times) + print(np.mean(times[1:]) / len(preds_all)) + weight = np.ones(len(gt_text)) + loss = text_recognizer.get_ctcloss(preds, gt_text, weight) + for i in range(len(valid_image_file_list)): + pred = preds_all[i] + order, idx = text_recognizer.decode(pred) + text = text_recognizer.get_text(order) + print( + f'{valid_image_file_list[i]}: pred/gt="{text}"/"{gt_text[i]}", loss={loss[i]:.2f}' + ) + except Exception as E: + print(traceback.format_exc(), E) + + +if __name__ == "__main__": + main() diff --git a/lama_cleaner/tests/overture-creations-5sI6fQgYIuo_all_mask.png b/iopaint/model/anytext/ldm/__init__.py similarity index 100% rename from lama_cleaner/tests/overture-creations-5sI6fQgYIuo_all_mask.png rename to iopaint/model/anytext/ldm/__init__.py diff --git a/iopaint/model/anytext/ldm/models/__init__.py b/iopaint/model/anytext/ldm/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iopaint/model/anytext/ldm/models/autoencoder.py b/iopaint/model/anytext/ldm/models/autoencoder.py new file mode 100644 index 0000000..20d52e9 --- /dev/null +++ b/iopaint/model/anytext/ldm/models/autoencoder.py @@ -0,0 +1,218 @@ +import torch +import torch.nn.functional as F +from contextlib import contextmanager + +from iopaint.model.anytext.ldm.modules.diffusionmodules.model import Encoder, Decoder +from iopaint.model.anytext.ldm.modules.distributions.distributions import DiagonalGaussianDistribution + +from iopaint.model.anytext.ldm.util import instantiate_from_config +from iopaint.model.anytext.ldm.modules.ema import LitEma + + +class AutoencoderKL(torch.nn.Module): + def __init__(self, + ddconfig, + lossconfig, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + ema_decay=None, + learn_logvar=False + ): + super().__init__() + self.learn_logvar = learn_logvar + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + assert ddconfig["double_z"] + self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + + self.use_ema = ema_decay is not None + if self.use_ema: + self.ema_decay = ema_decay + assert 0. < ema_decay < 1. + self.model_ema = LitEma(self, decay=ema_decay) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + self.load_state_dict(sd, strict=False) + print(f"Restored from {path}") + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input, sample_posterior=True): + posterior = self.encode(input) + if sample_posterior: + z = posterior.sample() + else: + z = posterior.mode() + dec = self.decode(z) + return dec, posterior + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + + if optimizer_idx == 0: + # train encoder+decoder+logvar + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return aeloss + + if optimizer_idx == 1: + # train the discriminator + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + + self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema") + return log_dict + + def _validation_step(self, batch, batch_idx, postfix=""): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, + last_layer=self.get_last_layer(), split="val"+postfix) + + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, + last_layer=self.get_last_layer(), split="val"+postfix) + + self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"]) + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr = self.learning_rate + ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list( + self.quant_conv.parameters()) + list(self.post_quant_conv.parameters()) + if self.learn_logvar: + print(f"{self.__class__.__name__}: Learning logvar") + ae_params_list.append(self.loss.logvar) + opt_ae = torch.optim.Adam(ae_params_list, + lr=lr, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr, betas=(0.5, 0.9)) + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + @torch.no_grad() + def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if not only_inputs: + xrec, posterior = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["samples"] = self.decode(torch.randn_like(posterior.sample())) + log["reconstructions"] = xrec + if log_ema or self.use_ema: + with self.ema_scope(): + xrec_ema, posterior_ema = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec_ema.shape[1] > 3 + xrec_ema = self.to_rgb(xrec_ema) + log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample())) + log["reconstructions_ema"] = xrec_ema + log["inputs"] = x + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class IdentityFirstStage(torch.nn.Module): + def __init__(self, *args, vq_interface=False, **kwargs): + self.vq_interface = vq_interface + super().__init__() + + def encode(self, x, *args, **kwargs): + return x + + def decode(self, x, *args, **kwargs): + return x + + def quantize(self, x, *args, **kwargs): + if self.vq_interface: + return x, None, [None, None, None] + return x + + def forward(self, x, *args, **kwargs): + return x + diff --git a/iopaint/model/anytext/ldm/models/diffusion/__init__.py b/iopaint/model/anytext/ldm/models/diffusion/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iopaint/model/anytext/ldm/models/diffusion/ddim.py b/iopaint/model/anytext/ldm/models/diffusion/ddim.py new file mode 100644 index 0000000..f8bbaff --- /dev/null +++ b/iopaint/model/anytext/ldm/models/diffusion/ddim.py @@ -0,0 +1,354 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor + + +class DDIMSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + dynamic_threshold=None, + ucg_schedule=None, + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + ctmp = conditioning[list(conditioning.keys())[0]] + while isinstance(ctmp, list): ctmp = ctmp[0] + cbs = ctmp.shape[0] + # cbs = len(ctmp[0]) + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + + elif isinstance(conditioning, list): + for ctmp in conditioning: + if ctmp.shape[0] != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for DDIM sampling is {size}, eta {eta}') + + samples, intermediates = self.ddim_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ucg_schedule=ucg_schedule + ) + return samples, intermediates + + @torch.no_grad() + def ddim_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, + ucg_schedule=None): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img], "index": [10000]} + time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + if ucg_schedule is not None: + assert len(ucg_schedule) == len(time_range) + unconditional_guidance_scale = ucg_schedule[i] + + outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold) + img, pred_x0 = outs + if callback: + callback(i) + if img_callback: + img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + intermediates['index'].append(index) + + return img, intermediates + + @torch.no_grad() + def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, + dynamic_threshold=None): + b, *_, device = *x.shape, x.device + + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + model_output = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + if isinstance(c, dict): + assert isinstance(unconditional_conditioning, dict) + c_in = dict() + for k in c: + if isinstance(c[k], list): + c_in[k] = [torch.cat([ + unconditional_conditioning[k][i], + c[k][i]]) for i in range(len(c[k]))] + elif isinstance(c[k], dict): + c_in[k] = dict() + for key in c[k]: + if isinstance(c[k][key], list): + if not isinstance(c[k][key][0], torch.Tensor): + continue + c_in[k][key] = [torch.cat([ + unconditional_conditioning[k][key][i], + c[k][key][i]]) for i in range(len(c[k][key]))] + else: + c_in[k][key] = torch.cat([ + unconditional_conditioning[k][key], + c[k][key]]) + + else: + c_in[k] = torch.cat([ + unconditional_conditioning[k], + c[k]]) + elif isinstance(c, list): + c_in = list() + assert isinstance(unconditional_conditioning, list) + for i in range(len(c)): + c_in.append(torch.cat([unconditional_conditioning[i], c[i]])) + else: + c_in = torch.cat([unconditional_conditioning, c]) + model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) + + if self.model.parameterization == "v": + e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) + else: + e_t = model_output + + if score_corrector is not None: + assert self.model.parameterization == "eps", 'not implemented' + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + if self.model.parameterization != "v": + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + else: + pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) + + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + + if dynamic_threshold is not None: + raise NotImplementedError() + + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + @torch.no_grad() + def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, + unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): + num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] + + assert t_enc <= num_reference_steps + num_steps = t_enc + + if use_original_steps: + alphas_next = self.alphas_cumprod[:num_steps] + alphas = self.alphas_cumprod_prev[:num_steps] + else: + alphas_next = self.ddim_alphas[:num_steps] + alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) + + x_next = x0 + intermediates = [] + inter_steps = [] + for i in tqdm(range(num_steps), desc='Encoding Image'): + t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) + if unconditional_guidance_scale == 1.: + noise_pred = self.model.apply_model(x_next, t, c) + else: + assert unconditional_conditioning is not None + e_t_uncond, noise_pred = torch.chunk( + self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), + torch.cat((unconditional_conditioning, c))), 2) + noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) + + xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next + weighted_noise_pred = alphas_next[i].sqrt() * ( + (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred + x_next = xt_weighted + weighted_noise_pred + if return_intermediates and i % ( + num_steps // return_intermediates) == 0 and i < num_steps - 1: + intermediates.append(x_next) + inter_steps.append(i) + elif return_intermediates and i >= num_steps - 2: + intermediates.append(x_next) + inter_steps.append(i) + if callback: callback(i) + + out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} + if return_intermediates: + out.update({'intermediates': intermediates}) + return x_next, out + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) + + @torch.no_grad() + def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, + use_original_steps=False, callback=None): + + timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='Decoding image', total=total_steps) + x_dec = x_latent + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) + x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + if callback: callback(i) + return x_dec \ No newline at end of file diff --git a/iopaint/model/anytext/ldm/models/diffusion/ddpm.py b/iopaint/model/anytext/ldm/models/diffusion/ddpm.py new file mode 100644 index 0000000..9f48918 --- /dev/null +++ b/iopaint/model/anytext/ldm/models/diffusion/ddpm.py @@ -0,0 +1,2380 @@ +""" +Part of the implementation is borrowed and modified from ControlNet, publicly available at https://github.com/lllyasviel/ControlNet/blob/main/ldm/models/diffusion/ddpm.py +""" + +import torch +import torch.nn as nn +import numpy as np +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager, nullcontext +from functools import partial +import itertools +from tqdm import tqdm +from torchvision.utils import make_grid +from omegaconf import ListConfig + +from iopaint.model.anytext.ldm.util import ( + log_txt_as_img, + exists, + default, + ismap, + isimage, + mean_flat, + count_params, + instantiate_from_config, +) +from iopaint.model.anytext.ldm.modules.ema import LitEma +from iopaint.model.anytext.ldm.modules.distributions.distributions import ( + normal_kl, + DiagonalGaussianDistribution, +) +from iopaint.model.anytext.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( + make_beta_schedule, + extract_into_tensor, + noise_like, +) +from iopaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler +import cv2 + + +__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} + +PRINT_DEBUG = False + + +def print_grad(grad): + # print('Gradient:', grad) + # print(grad.shape) + a = grad.max() + b = grad.min() + # print(f'mean={grad.mean():.4f}, max={a:.4f}, min={b:.4f}') + s = 255.0 / (a - b) + c = 255 * (-b / (a - b)) + grad = grad * s + c + # print(f'mean={grad.mean():.4f}, max={grad.max():.4f}, min={grad.min():.4f}') + img = grad[0].permute(1, 2, 0).detach().cpu().numpy() + if img.shape[0] == 512: + cv2.imwrite("grad-img.jpg", img) + elif img.shape[0] == 64: + cv2.imwrite("grad-latent.jpg", img) + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPM(torch.nn.Module): + # classic DDPM with Gaussian diffusion, in image space + def __init__( + self, + unet_config, + timesteps=1000, + beta_schedule="linear", + loss_type="l2", + ckpt_path=None, + ignore_keys=[], + load_only_unet=False, + monitor="val/loss", + use_ema=True, + first_stage_key="image", + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0.0, + v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1.0, + conditioning_key=None, + parameterization="eps", # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0.0, + make_it_fit=False, + ucg_training=None, + reset_ema=False, + reset_num_ema_updates=False, + ): + super().__init__() + assert parameterization in [ + "eps", + "x0", + "v", + ], 'currently only supporting "eps" and "x0" and "v"' + self.parameterization = parameterization + print( + f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" + ) + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapper(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + if monitor is not None: + self.monitor = monitor + self.make_it_fit = make_it_fit + if reset_ema: + assert exists(ckpt_path) + if ckpt_path is not None: + self.init_from_ckpt( + ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet + ) + if reset_ema: + assert self.use_ema + print( + f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint." + ) + self.model_ema = LitEma(self.model) + if reset_num_ema_updates: + print( + " +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ " + ) + assert self.use_ema + self.model_ema.reset_num_updates() + + self.register_schedule( + given_betas=given_betas, + beta_schedule=beta_schedule, + timesteps=timesteps, + linear_start=linear_start, + linear_end=linear_end, + cosine_s=cosine_s, + ) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + else: + self.register_buffer("logvar", logvar) + + self.ucg_training = ucg_training or dict() + if self.ucg_training: + self.ucg_prng = np.random.RandomState() + + def register_schedule( + self, + given_betas=None, + beta_schedule="linear", + timesteps=1000, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + ): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule( + beta_schedule, + timesteps, + linear_start=linear_start, + linear_end=linear_end, + cosine_s=cosine_s, + ) + alphas = 1.0 - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + # np.save('1.npy', alphas_cumprod) + alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) + + (timesteps,) = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert ( + alphas_cumprod.shape[0] == self.num_timesteps + ), "alphas have to be defined for each timestep" + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer("betas", to_torch(betas)) + self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) + self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer( + "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) + ) + self.register_buffer( + "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) + ) + self.register_buffer( + "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) + ) + self.register_buffer( + "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) + ) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * ( + 1.0 - alphas_cumprod_prev + ) / (1.0 - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer("posterior_variance", to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer( + "posterior_log_variance_clipped", + to_torch(np.log(np.maximum(posterior_variance, 1e-20))), + ) + self.register_buffer( + "posterior_mean_coef1", + to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), + ) + self.register_buffer( + "posterior_mean_coef2", + to_torch( + (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) + ), + ) + + if self.parameterization == "eps": + lvlb_weights = self.betas**2 / ( + 2 + * self.posterior_variance + * to_torch(alphas) + * (1 - self.alphas_cumprod) + ) + elif self.parameterization == "x0": + lvlb_weights = ( + 0.5 + * np.sqrt(torch.Tensor(alphas_cumprod)) + / (2.0 * 1 - torch.Tensor(alphas_cumprod)) + ) + elif self.parameterization == "v": + lvlb_weights = torch.ones_like( + self.betas**2 + / ( + 2 + * self.posterior_variance + * to_torch(alphas) + * (1 - self.alphas_cumprod) + ) + ) + else: + raise NotImplementedError("mu not supported") + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + @torch.no_grad() + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + if self.make_it_fit: + n_params = len( + [ + name + for name, _ in itertools.chain( + self.named_parameters(), self.named_buffers() + ) + ] + ) + for name, param in tqdm( + itertools.chain(self.named_parameters(), self.named_buffers()), + desc="Fitting old weights to new weights", + total=n_params, + ): + if not name in sd: + continue + old_shape = sd[name].shape + new_shape = param.shape + assert len(old_shape) == len(new_shape) + if len(new_shape) > 2: + # we only modify first two axes + assert new_shape[2:] == old_shape[2:] + # assumes first axis corresponds to output dim + if not new_shape == old_shape: + new_param = param.clone() + old_param = sd[name] + if len(new_shape) == 1: + for i in range(new_param.shape[0]): + new_param[i] = old_param[i % old_shape[0]] + elif len(new_shape) >= 2: + for i in range(new_param.shape[0]): + for j in range(new_param.shape[1]): + new_param[i, j] = old_param[ + i % old_shape[0], j % old_shape[1] + ] + + n_used_old = torch.ones(old_shape[1]) + for j in range(new_param.shape[1]): + n_used_old[j % old_shape[1]] += 1 + n_used_new = torch.zeros(new_shape[1]) + for j in range(new_param.shape[1]): + n_used_new[j] = n_used_old[j % old_shape[1]] + + n_used_new = n_used_new[None, :] + while len(n_used_new.shape) < len(new_shape): + n_used_new = n_used_new.unsqueeze(-1) + new_param /= n_used_new + + sd[name] = new_param + + missing, unexpected = ( + self.load_state_dict(sd, strict=False) + if not only_model + else self.model.load_state_dict(sd, strict=False) + ) + print( + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" + ) + if len(missing) > 0: + print(f"Missing Keys:\n {missing}") + if len(unexpected) > 0: + print(f"\nUnexpected Keys:\n {unexpected}") + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract_into_tensor( + self.log_one_minus_alphas_cumprod, t, x_start.shape + ) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + * noise + ) + + def predict_start_from_z_and_v(self, x_t, t, v): + # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v + ) + + def predict_eps_from_z_and_v(self, x_t, t, v): + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) + * x_t + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract_into_tensor( + self.posterior_log_variance_clipped, t, x_t.shape + ) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1.0, 1.0) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior( + x_start=x_recon, x_t=x, t=t + ) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance( + x=x, t=t, clip_denoised=clip_denoised + ) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm( + reversed(range(0, self.num_timesteps)), + desc="Sampling t", + total=self.num_timesteps, + ): + img = self.p_sample( + img, + torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised, + ) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop( + (batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates, + ) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) + * noise + ) + + def get_v(self, x, noise, t): + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x + ) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == "l1": + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == "l2": + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss(target, pred, reduction="none") + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == "eps": + target = noise + elif self.parameterization == "x0": + target = x_start + elif self.parameterization == "v": + target = self.get_v(x_start, noise, t) + else: + raise NotImplementedError( + f"Parameterization {self.parameterization} not yet supported" + ) + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = "train" if self.training else "val" + + loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f"{log_prefix}/loss": loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint( + 0, self.num_timesteps, (x.shape[0],), device=self.device + ).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, "b h w c -> b c h w") + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + for k in self.ucg_training: + p = self.ucg_training[k]["p"] + val = self.ucg_training[k]["val"] + if val is None: + val = "" + for i in range(len(batch[k])): + if self.ucg_prng.choice(2, p=[1 - p, p]): + batch[k][i] = val + + loss, loss_dict = self.shared_step(batch) + + self.log_dict( + loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True + ) + + self.log( + "global_step", + self.global_step, + prog_bar=True, + logger=True, + on_step=True, + on_epoch=False, + ) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]["lr"] + self.log( + "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False + ) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} + self.log_dict( + loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True + ) + self.log_dict( + loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True + ) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, "n b c h w -> b n c h w") + denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): + log = dict() + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log["inputs"] = x + + # get diffusion row + diffusion_row = list() + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, denoise_row = self.sample( + batch_size=N, return_intermediates=True + ) + + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusion(DDPM): + """main class""" + + def __init__( + self, + first_stage_config, + cond_stage_config, + num_timesteps_cond=None, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + force_null_conditioning=False, + *args, + **kwargs, + ): + self.force_null_conditioning = force_null_conditioning + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs["timesteps"] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = "concat" if concat_mode else "crossattn" + if ( + cond_stage_config == "__is_unconditional__" + and not self.force_null_conditioning + ): + conditioning_key = None + ckpt_path = kwargs.pop("ckpt_path", None) + reset_ema = kwargs.pop("reset_ema", False) + reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) + ignore_keys = kwargs.pop("ignore_keys", []) + super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + try: + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 + except: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer("scale_factor", torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + self.cond_stage_forward = cond_stage_forward + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + if reset_ema: + assert self.use_ema + print( + f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint." + ) + self.model_ema = LitEma(self.model) + if reset_num_ema_updates: + print( + " +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ " + ) + assert self.use_ema + self.model_ema.reset_num_updates() + + def make_cond_schedule( + self, + ): + self.cond_ids = torch.full( + size=(self.num_timesteps,), + fill_value=self.num_timesteps - 1, + dtype=torch.long, + ) + ids = torch.round( + torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) + ).long() + self.cond_ids[: self.num_timesteps_cond] = ids + + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + # only for very first batch + if ( + self.scale_by_std + and self.current_epoch == 0 + and self.global_step == 0 + and batch_idx == 0 + and not self.restarted_from_ckpt + ): + assert ( + self.scale_factor == 1.0 + ), "rather not use custom rescaling and std-rescaling simultaneously" + # set rescale weight to 1./std of encodings + print("### USING STD-RESCALING ###") + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer("scale_factor", 1.0 / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") + + def register_schedule( + self, + given_betas=None, + beta_schedule="linear", + timesteps=1000, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + ): + super().register_schedule( + given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s + ) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") + self.cond_stage_model = self.first_stage_model + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != "__is_first_stage__" + assert config != "__is_unconditional__" + model = instantiate_from_config(config) + self.cond_stage_model = model + + def _get_denoise_row_from_list( + self, samples, desc="", force_no_decoder_quantization=False + ): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append( + self.decode_first_stage( + zd.to(self.device), force_not_quantize=force_no_decoder_quantization + ) + ) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") + denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError( + f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" + ) + return self.scale_factor * z + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, "encode") and callable( + self.cond_stage_model.encode + ): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min( + torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 + )[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip( + weighting, + self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], + ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + + if self.split_input_params["tie_braker"]: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip( + L_weighting, + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"], + ) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold( + self, x, kernel_size, stride, uf=1, df=1 + ): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict( + kernel_size=kernel_size, dilation=1, padding=0, stride=stride + ) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting( + kernel_size[0], kernel_size[1], Ly, Lx, x.device + ).to(x.dtype) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) + + elif uf > 1 and df == 1: + fold_params = dict( + kernel_size=kernel_size, dilation=1, padding=0, stride=stride + ) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict( + kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, + padding=0, + stride=(stride[0] * uf, stride[1] * uf), + ) + fold = torch.nn.Fold( + output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 + ) + + weighting = self.get_weighting( + kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device + ).to(x.dtype) + normalization = fold(weighting).view( + 1, 1, h * uf, w * uf + ) # normalizes the overlap + weighting = weighting.view( + (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) + ) + + elif df > 1 and uf == 1: + fold_params = dict( + kernel_size=kernel_size, dilation=1, padding=0, stride=stride + ) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict( + kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, + padding=0, + stride=(stride[0] // df, stride[1] // df), + ) + fold = torch.nn.Fold( + output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 + ) + + weighting = self.get_weighting( + kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device + ).to(x.dtype) + normalization = fold(weighting).view( + 1, 1, h // df, w // df + ) # normalizes the overlap + weighting = weighting.view( + (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) + ) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + @torch.no_grad() + def get_input( + self, + batch, + k, + return_first_stage_outputs=False, + force_c_encode=False, + cond_key=None, + return_original_cond=False, + bs=None, + return_x=False, + mask_k=None, + ): + x = super().get_input(batch, k) + if bs is not None: + x = x[:bs] + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + + if mask_k is not None: + mx = super().get_input(batch, mask_k) + if bs is not None: + mx = mx[:bs] + mx = mx.to(self.device) + encoder_posterior = self.encode_first_stage(mx) + mx = self.get_first_stage_encoding(encoder_posterior).detach() + + if self.model.conditioning_key is not None and not self.force_null_conditioning: + if cond_key is None: + cond_key = self.cond_stage_key + if cond_key != self.first_stage_key: + if cond_key in ["caption", "coordinates_bbox", "txt"]: + xc = batch[cond_key] + elif cond_key in ["class_label", "cls"]: + xc = batch + else: + xc = super().get_input(batch, cond_key).to(self.device) + else: + xc = x + if not self.cond_stage_trainable or force_c_encode: + if isinstance(xc, dict) or isinstance(xc, list): + c = self.get_learned_conditioning(xc) + else: + c = self.get_learned_conditioning(xc.to(self.device)) + else: + c = xc + if bs is not None: + c = c[:bs] + + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + ckey = __conditioning_keys__[self.model.conditioning_key] + c = {ckey: c, "pos_x": pos_x, "pos_y": pos_y} + + else: + c = None + xc = None + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + c = {"pos_x": pos_x, "pos_y": pos_y} + out = [z, c] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_x: + out.extend([x]) + if return_original_cond: + out.append(xc) + if mask_k: + out.append(mx) + return out + + @torch.no_grad() + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, "b h w c -> b c h w").contiguous() + + z = 1.0 / self.scale_factor * z + return self.first_stage_model.decode(z) + + def decode_first_stage_grad(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, "b h w c -> b c h w").contiguous() + + z = 1.0 / self.scale_factor * z + return self.first_stage_model.decode(z) + + @torch.no_grad() + def encode_first_stage(self, x): + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint( + 0, self.num_timesteps, (x.shape[0],), device=self.device + ).long() + # t = torch.randint(500, 501, (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + def apply_model(self, x_noisy, t, cond, return_ids=False): + if isinstance(cond, dict): + # hybrid case, cond is expected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = ( + "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" + ) + cond = {key: cond} + + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - pred_xstart + ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl( + mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 + ) + return mean_flat(kl_prior) / np.log(2.0) + + def p_mean_variance( + self, + x, + c, + t, + clip_denoised: bool, + return_codebook_ids=False, + quantize_denoised=False, + return_x0=False, + score_corrector=None, + corrector_kwargs=None, + ): + t_in = t + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) + + if score_corrector is not None: + assert self.parameterization == "eps" + model_out = score_corrector.modify_score( + self, model_out, x, t, c, **corrector_kwargs + ) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1.0, 1.0) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) + model_mean, posterior_variance, posterior_log_variance = self.q_posterior( + x_start=x_recon, x_t=x, t=t + ) + if return_codebook_ids: + return model_mean, posterior_variance, posterior_log_variance, logits + elif return_x0: + return model_mean, posterior_variance, posterior_log_variance, x_recon + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample( + self, + x, + c, + t, + clip_denoised=False, + repeat_noise=False, + return_codebook_ids=False, + quantize_denoised=False, + return_x0=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + ): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance( + x=x, + c=c, + t=t, + clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + ) + if return_codebook_ids: + raise DeprecationWarning("Support dropped.") + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.0: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + + if return_codebook_ids: + return model_mean + nonzero_mask * ( + 0.5 * model_log_variance + ).exp() * noise, logits.argmax(dim=1) + if return_x0: + return ( + model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, + x0, + ) + else: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def progressive_denoising( + self, + cond, + shape, + verbose=True, + callback=None, + quantize_denoised=False, + img_callback=None, + mask=None, + x0=None, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + batch_size=None, + x_T=None, + start_T=None, + log_every_t=None, + ): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = { + key: cond[key][:batch_size] + if not isinstance(cond[key], list) + else list(map(lambda x: x[:batch_size], cond[key])) + for key in cond + } + else: + cond = ( + [c[:batch_size] for c in cond] + if isinstance(cond, list) + else cond[:batch_size] + ) + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = ( + tqdm( + reversed(range(0, timesteps)), + desc="Progressive Generation", + total=timesteps, + ) + if verbose + else reversed(range(0, timesteps)) + ) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != "hybrid" + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img, x0_partial = self.p_sample( + img, + cond, + ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, + return_x0=True, + temperature=temperature[i], + noise_dropout=noise_dropout, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + ) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1.0 - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: + callback(i) + if img_callback: + img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop( + self, + cond, + shape, + return_intermediates=False, + x_T=None, + verbose=True, + callback=None, + timesteps=None, + quantize_denoised=False, + mask=None, + x0=None, + img_callback=None, + start_T=None, + log_every_t=None, + ): + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = ( + tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) + if verbose + else reversed(range(0, timesteps)) + ) + + if mask is not None: + assert x0 is not None + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != "hybrid" + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img = self.p_sample( + img, + cond, + ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, + ) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1.0 - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: + callback(i) + if img_callback: + img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample( + self, + cond, + batch_size=16, + return_intermediates=False, + x_T=None, + verbose=True, + timesteps=None, + quantize_denoised=False, + mask=None, + x0=None, + shape=None, + **kwargs, + ): + if shape is None: + shape = (batch_size, self.channels, self.image_size, self.image_size) + if cond is not None: + if isinstance(cond, dict): + cond = { + key: cond[key][:batch_size] + if not isinstance(cond[key], list) + else list(map(lambda x: x[:batch_size], cond[key])) + for key in cond + } + else: + cond = ( + [c[:batch_size] for c in cond] + if isinstance(cond, list) + else cond[:batch_size] + ) + return self.p_sample_loop( + cond, + shape, + return_intermediates=return_intermediates, + x_T=x_T, + verbose=verbose, + timesteps=timesteps, + quantize_denoised=quantize_denoised, + mask=mask, + x0=x0, + ) + + @torch.no_grad() + def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates = ddim_sampler.sample( + ddim_steps, batch_size, shape, cond, verbose=False, **kwargs + ) + + else: + samples, intermediates = self.sample( + cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs + ) + + return samples, intermediates + + @torch.no_grad() + def get_unconditional_conditioning(self, batch_size, null_label=None): + if null_label is not None: + xc = null_label + if isinstance(xc, ListConfig): + xc = list(xc) + if isinstance(xc, dict) or isinstance(xc, list): + c = self.get_learned_conditioning(xc) + else: + if hasattr(xc, "to"): + xc = xc.to(self.device) + c = self.get_learned_conditioning(xc) + else: + if self.cond_stage_key in ["class_label", "cls"]: + xc = self.cond_stage_model.get_unconditional_conditioning( + batch_size, device=self.device + ) + return self.get_learned_conditioning(xc) + else: + raise NotImplementedError("todo") + if isinstance(c, list): # in case the encoder gives us a list + for i in range(len(c)): + c[i] = repeat(c[i], "1 ... -> b ...", b=batch_size).to(self.device) + else: + c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) + return c + + @torch.no_grad() + def log_images( + self, + batch, + N=8, + n_row=4, + sample=True, + ddim_steps=50, + ddim_eta=0.0, + return_keys=None, + quantize_denoised=True, + inpaint=True, + plot_denoise_rows=False, + plot_progressive_rows=True, + plot_diffusion_rows=True, + unconditional_guidance_scale=1.0, + unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs, + ): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N, + ) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch[self.cond_stage_key], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif self.cond_stage_key in ["class_label", "cls"]: + try: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch["human_label"], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + except KeyError: + # probably no "human_label" in batch + pass + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") + diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if ( + quantize_denoised + and not isinstance(self.first_stage_model, AutoencoderKL) + and not isinstance(self.first_stage_model, IdentityFirstStage) + ): + # also display when quantizing x0 while sampling + with ema_scope("Plotting Quantized Denoised"): + samples, z_denoise_row = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + quantize_denoised=True, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_x0_quantized"] = x_samples + + if unconditional_guidance_scale > 1.0: + uc = self.get_unconditional_conditioning(N, unconditional_guidance_label) + if self.model.conditioning_key == "crossattn-adm": + uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]} + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[ + f"samples_cfg_scale_{unconditional_guidance_scale:.2f}" + ] = x_samples_cfg + + if inpaint: + # make a simple center square + b, h, w = z.shape[0], z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0 + mask = mask[:, None, ...] + with ema_scope("Plotting Inpaint"): + samples, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + eta=ddim_eta, + ddim_steps=ddim_steps, + x0=z[:N], + mask=mask, + ) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_inpainting"] = x_samples + log["mask"] = mask + + # outpaint + mask = 1.0 - mask + with ema_scope("Plotting Outpaint"): + samples, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + eta=ddim_eta, + ddim_steps=ddim_steps, + x0=z[:N], + mask=mask, + ) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_outpainting"] = x_samples + + if plot_progressive_rows: + with ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising( + c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N, + ) + prog_row = self._get_denoise_row_from_list( + progressives, desc="Progressive Generation" + ) + log["progressive_row"] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.cond_stage_trainable: + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print("Diffusion model optimizing logvar") + params.append(self.logvar) + opt = torch.optim.AdamW(params, lr=lr) + if self.use_scheduler: + assert "target" in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + "scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, + } + ] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, "colorize"): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 + return x + + +class DiffusionWrapper(torch.nn.Module): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.sequential_cross_attn = diff_model_config.pop( + "sequential_crossattn", False + ) + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [ + None, + "concat", + "crossattn", + "hybrid", + "adm", + "hybrid-adm", + "crossattn-adm", + ] + + def forward( + self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None + ): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == "concat": + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == "crossattn": + if not self.sequential_cross_attn: + cc = torch.cat(c_crossattn, 1) + else: + cc = c_crossattn + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == "hybrid": + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == "hybrid-adm": + assert c_adm is not None + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc, y=c_adm) + elif self.conditioning_key == "crossattn-adm": + assert c_adm is not None + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc, y=c_adm) + elif self.conditioning_key == "adm": + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class LatentUpscaleDiffusion(LatentDiffusion): + def __init__( + self, + *args, + low_scale_config, + low_scale_key="LR", + noise_level_key=None, + **kwargs, + ): + super().__init__(*args, **kwargs) + # assumes that neither the cond_stage nor the low_scale_model contain trainable params + assert not self.cond_stage_trainable + self.instantiate_low_stage(low_scale_config) + self.low_scale_key = low_scale_key + self.noise_level_key = noise_level_key + + def instantiate_low_stage(self, config): + model = instantiate_from_config(config) + self.low_scale_model = model.eval() + self.low_scale_model.train = disabled_train + for param in self.low_scale_model.parameters(): + param.requires_grad = False + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): + if not log_mode: + z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) + else: + z, c, x, xrec, xc = super().get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=bs, + ) + x_low = batch[self.low_scale_key][:bs] + x_low = rearrange(x_low, "b h w c -> b c h w") + x_low = x_low.to(memory_format=torch.contiguous_format).float() + zx, noise_level = self.low_scale_model(x_low) + if self.noise_level_key is not None: + # get noise level from batch instead, e.g. when extracting a custom noise level for bsr + raise NotImplementedError("TODO") + + all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level} + if log_mode: + # TODO: maybe disable if too expensive + x_low_rec = self.low_scale_model.decode(zx) + return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level + return z, all_conds + + @torch.no_grad() + def log_images( + self, + batch, + N=8, + n_row=4, + sample=True, + ddim_steps=200, + ddim_eta=1.0, + return_keys=None, + plot_denoise_rows=False, + plot_progressive_rows=True, + plot_diffusion_rows=True, + unconditional_guidance_scale=1.0, + unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs, + ): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input( + batch, self.first_stage_key, bs=N, log_mode=True + ) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + log["x_lr"] = x_low + log[ + f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}" + ] = x_low_rec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch[self.cond_stage_key], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif self.cond_stage_key in ["class_label", "cls"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch["human_label"], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") + diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_tmp = self.get_unconditional_conditioning( + N, unconditional_guidance_label + ) + # TODO explore better "unconditional" choices for the other keys + # maybe guide away from empty text label and highest noise level and maximally degraded zx? + uc = dict() + for k in c: + if k == "c_crossattn": + assert isinstance(c[k], list) and len(c[k]) == 1 + uc[k] = [uc_tmp] + elif k == "c_adm": # todo: only run with text-based guidance? + assert isinstance(c[k], torch.Tensor) + # uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level + uc[k] = c[k] + elif isinstance(c[k], list): + uc[k] = [c[k][i] for i in range(len(c[k]))] + else: + uc[k] = c[k] + + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[ + f"samples_cfg_scale_{unconditional_guidance_scale:.2f}" + ] = x_samples_cfg + + if plot_progressive_rows: + with ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising( + c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N, + ) + prog_row = self._get_denoise_row_from_list( + progressives, desc="Progressive Generation" + ) + log["progressive_row"] = prog_row + + return log + + +class LatentFinetuneDiffusion(LatentDiffusion): + """ + Basis for different finetunas, such as inpainting or depth2image + To disable finetuning mode, set finetune_keys to None + """ + + def __init__( + self, + concat_keys: tuple, + finetune_keys=( + "model.diffusion_model.input_blocks.0.0.weight", + "model_ema.diffusion_modelinput_blocks00weight", + ), + keep_finetune_dims=4, + # if model was trained without concat mode before and we would like to keep these channels + c_concat_log_start=None, # to log reconstruction of c_concat codes + c_concat_log_end=None, + *args, + **kwargs, + ): + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", list()) + super().__init__(*args, **kwargs) + self.finetune_keys = finetune_keys + self.concat_keys = concat_keys + self.keep_dims = keep_finetune_dims + self.c_concat_log_start = c_concat_log_start + self.c_concat_log_end = c_concat_log_end + if exists(self.finetune_keys): + assert exists(ckpt_path), "can only finetune from a given checkpoint" + if exists(ckpt_path): + self.init_from_ckpt(ckpt_path, ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + + # make it explicit, finetune by including extra input channels + if exists(self.finetune_keys) and k in self.finetune_keys: + new_entry = None + for name, param in self.named_parameters(): + if name in self.finetune_keys: + print( + f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only" + ) + new_entry = torch.zeros_like(param) # zero init + assert exists(new_entry), "did not find matching parameter to modify" + new_entry[:, : self.keep_dims, ...] = sd[k] + sd[k] = new_entry + + missing, unexpected = ( + self.load_state_dict(sd, strict=False) + if not only_model + else self.model.load_state_dict(sd, strict=False) + ) + print( + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" + ) + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + @torch.no_grad() + def log_images( + self, + batch, + N=8, + n_row=4, + sample=True, + ddim_steps=200, + ddim_eta=1.0, + return_keys=None, + quantize_denoised=True, + inpaint=True, + plot_denoise_rows=False, + plot_progressive_rows=True, + plot_diffusion_rows=True, + unconditional_guidance_scale=1.0, + unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs, + ): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input( + batch, self.first_stage_key, bs=N, return_first_stage_outputs=True + ) + c_cat, c = c["c_concat"][0], c["c_crossattn"][0] + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch[self.cond_stage_key], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif self.cond_stage_key in ["class_label", "cls"]: + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), + batch["human_label"], + size=x.shape[2] // 25, + ) + log["conditioning"] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if not (self.c_concat_log_start is None and self.c_concat_log_end is None): + log["c_concat_decoded"] = self.decode_first_stage( + c_cat[:, self.c_concat_log_start : self.c_concat_log_end] + ) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") + diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log( + cond={"c_concat": [c_cat], "c_crossattn": [c]}, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_cross = self.get_unconditional_conditioning( + N, unconditional_guidance_label + ) + uc_cat = c_cat + uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log( + cond={"c_concat": [c_cat], "c_crossattn": [c]}, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc_full, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[ + f"samples_cfg_scale_{unconditional_guidance_scale:.2f}" + ] = x_samples_cfg + + return log + + +class LatentInpaintDiffusion(LatentFinetuneDiffusion): + """ + can either run as pure inpainting model (only concat mode) or with mixed conditionings, + e.g. mask as concat and text via cross-attn. + To disable finetuning mode, set finetune_keys to None + """ + + def __init__( + self, + concat_keys=("mask", "masked_image"), + masked_image_key="masked_image", + *args, + **kwargs, + ): + super().__init__(concat_keys, *args, **kwargs) + self.masked_image_key = masked_image_key + assert self.masked_image_key in concat_keys + + @torch.no_grad() + def get_input( + self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False + ): + # note: restricted to non-trainable encoders currently + assert ( + not self.cond_stage_trainable + ), "trainable cond stages not yet supported for inpainting" + z, c, x, xrec, xc = super().get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=bs, + ) + + assert exists(self.concat_keys) + c_cat = list() + for ck in self.concat_keys: + cc = ( + rearrange(batch[ck], "b h w c -> b c h w") + .to(memory_format=torch.contiguous_format) + .float() + ) + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + bchw = z.shape + if ck != self.masked_image_key: + cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) + else: + cc = self.get_first_stage_encoding(self.encode_first_stage(cc)) + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, *args, **kwargs): + log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs) + log["masked_image"] = ( + rearrange(args[0]["masked_image"], "b h w c -> b c h w") + .to(memory_format=torch.contiguous_format) + .float() + ) + return log + + +class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion): + """ + condition on monocular depth estimation + """ + + def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs): + super().__init__(concat_keys=concat_keys, *args, **kwargs) + self.depth_model = instantiate_from_config(depth_stage_config) + self.depth_stage_key = concat_keys[0] + + @torch.no_grad() + def get_input( + self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False + ): + # note: restricted to non-trainable encoders currently + assert ( + not self.cond_stage_trainable + ), "trainable cond stages not yet supported for depth2img" + z, c, x, xrec, xc = super().get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=bs, + ) + + assert exists(self.concat_keys) + assert len(self.concat_keys) == 1 + c_cat = list() + for ck in self.concat_keys: + cc = batch[ck] + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + cc = self.depth_model(cc) + cc = torch.nn.functional.interpolate( + cc, + size=z.shape[2:], + mode="bicubic", + align_corners=False, + ) + + depth_min, depth_max = torch.amin( + cc, dim=[1, 2, 3], keepdim=True + ), torch.amax(cc, dim=[1, 2, 3], keepdim=True) + cc = 2.0 * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.0 + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, *args, **kwargs): + log = super().log_images(*args, **kwargs) + depth = self.depth_model(args[0][self.depth_stage_key]) + depth_min, depth_max = torch.amin( + depth, dim=[1, 2, 3], keepdim=True + ), torch.amax(depth, dim=[1, 2, 3], keepdim=True) + log["depth"] = 2.0 * (depth - depth_min) / (depth_max - depth_min) - 1.0 + return log + + +class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion): + """ + condition on low-res image (and optionally on some spatial noise augmentation) + """ + + def __init__( + self, + concat_keys=("lr",), + reshuffle_patch_size=None, + low_scale_config=None, + low_scale_key=None, + *args, + **kwargs, + ): + super().__init__(concat_keys=concat_keys, *args, **kwargs) + self.reshuffle_patch_size = reshuffle_patch_size + self.low_scale_model = None + if low_scale_config is not None: + print("Initializing a low-scale model") + assert exists(low_scale_key) + self.instantiate_low_stage(low_scale_config) + self.low_scale_key = low_scale_key + + def instantiate_low_stage(self, config): + model = instantiate_from_config(config) + self.low_scale_model = model.eval() + self.low_scale_model.train = disabled_train + for param in self.low_scale_model.parameters(): + param.requires_grad = False + + @torch.no_grad() + def get_input( + self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False + ): + # note: restricted to non-trainable encoders currently + assert ( + not self.cond_stage_trainable + ), "trainable cond stages not yet supported for upscaling-ft" + z, c, x, xrec, xc = super().get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=bs, + ) + + assert exists(self.concat_keys) + assert len(self.concat_keys) == 1 + # optionally make spatial noise_level here + c_cat = list() + noise_level = None + for ck in self.concat_keys: + cc = batch[ck] + cc = rearrange(cc, "b h w c -> b c h w") + if exists(self.reshuffle_patch_size): + assert isinstance(self.reshuffle_patch_size, int) + cc = rearrange( + cc, + "b c (p1 h) (p2 w) -> b (p1 p2 c) h w", + p1=self.reshuffle_patch_size, + p2=self.reshuffle_patch_size, + ) + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + if exists(self.low_scale_model) and ck == self.low_scale_key: + cc, noise_level = self.low_scale_model(cc) + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + if exists(noise_level): + all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level} + else: + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, *args, **kwargs): + log = super().log_images(*args, **kwargs) + log["lr"] = rearrange(args[0]["lr"], "b h w c -> b c h w") + return log diff --git a/iopaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py b/iopaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py new file mode 100644 index 0000000..7427f38 --- /dev/null +++ b/iopaint/model/anytext/ldm/models/diffusion/dpm_solver/__init__.py @@ -0,0 +1 @@ +from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/iopaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py b/iopaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py new file mode 100644 index 0000000..095e5ba --- /dev/null +++ b/iopaint/model/anytext/ldm/models/diffusion/dpm_solver/dpm_solver.py @@ -0,0 +1,1154 @@ +import torch +import torch.nn.functional as F +import math +from tqdm import tqdm + + +class NoiseScheduleVP: + def __init__( + self, + schedule='discrete', + betas=None, + alphas_cumprod=None, + continuous_beta_0=0.1, + continuous_beta_1=20., + ): + """Create a wrapper class for the forward SDE (VP type). + *** + Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. + We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. + *** + The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). + We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). + Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: + log_alpha_t = self.marginal_log_mean_coeff(t) + sigma_t = self.marginal_std(t) + lambda_t = self.marginal_lambda(t) + Moreover, as lambda(t) is an invertible function, we also support its inverse function: + t = self.inverse_lambda(lambda_t) + =============================================================== + We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). + 1. For discrete-time DPMs: + For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: + t_i = (i + 1) / N + e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. + We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. + Args: + betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) + alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) + Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. + **Important**: Please pay special attention for the args for `alphas_cumprod`: + The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that + q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). + Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have + alpha_{t_n} = \sqrt{\hat{alpha_n}}, + and + log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). + 2. For continuous-time DPMs: + We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise + schedule are the default settings in DDPM and improved-DDPM: + Args: + beta_min: A `float` number. The smallest beta for the linear schedule. + beta_max: A `float` number. The largest beta for the linear schedule. + cosine_s: A `float` number. The hyperparameter in the cosine schedule. + cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. + T: A `float` number. The ending time of the forward process. + =============================================================== + Args: + schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, + 'linear' or 'cosine' for continuous-time DPMs. + Returns: + A wrapper object of the forward SDE (VP type). + + =============================================================== + Example: + # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', betas=betas) + # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + # For continuous-time DPMs (VPSDE), linear schedule: + >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) + """ + + if schedule not in ['discrete', 'linear', 'cosine']: + raise ValueError( + "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format( + schedule)) + + self.schedule = schedule + if schedule == 'discrete': + if betas is not None: + log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) + else: + assert alphas_cumprod is not None + log_alphas = 0.5 * torch.log(alphas_cumprod) + self.total_N = len(log_alphas) + self.T = 1. + self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) + self.log_alpha_array = log_alphas.reshape((1, -1,)) + else: + self.total_N = 1000 + self.beta_0 = continuous_beta_0 + self.beta_1 = continuous_beta_1 + self.cosine_s = 0.008 + self.cosine_beta_max = 999. + self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * ( + 1. + self.cosine_s) / math.pi - self.cosine_s + self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) + self.schedule = schedule + if schedule == 'cosine': + # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. + # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. + self.T = 0.9946 + else: + self.T = 1. + + def marginal_log_mean_coeff(self, t): + """ + Compute log(alpha_t) of a given continuous-time label t in [0, T]. + """ + if self.schedule == 'discrete': + return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), + self.log_alpha_array.to(t.device)).reshape((-1)) + elif self.schedule == 'linear': + return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 + elif self.schedule == 'cosine': + log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) + log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 + return log_alpha_t + + def marginal_alpha(self, t): + """ + Compute alpha_t of a given continuous-time label t in [0, T]. + """ + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + """ + Compute sigma_t of a given continuous-time label t in [0, T]. + """ + return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + return log_mean_coeff - log_std + + def inverse_lambda(self, lamb): + """ + Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. + """ + if self.schedule == 'linear': + tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + Delta = self.beta_0 ** 2 + tmp + return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) + elif self.schedule == 'discrete': + log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) + t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), + torch.flip(self.t_array.to(lamb.device), [1])) + return t.reshape((-1,)) + else: + log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * ( + 1. + self.cosine_s) / math.pi - self.cosine_s + t = t_fn(log_alpha) + return t + + +def model_wrapper( + model, + noise_schedule, + model_type="noise", + model_kwargs={}, + guidance_type="uncond", + condition=None, + unconditional_condition=None, + guidance_scale=1., + classifier_fn=None, + classifier_kwargs={}, +): + """Create a wrapper function for the noise prediction model. + DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to + firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. + We support four types of the diffusion model by setting `model_type`: + 1. "noise": noise prediction model. (Trained by predicting noise). + 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). + 3. "v": velocity prediction model. (Trained by predicting the velocity). + The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. + [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." + arXiv preprint arXiv:2202.00512 (2022). + [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." + arXiv preprint arXiv:2210.02303 (2022). + + 4. "score": marginal score function. (Trained by denoising score matching). + Note that the score function and the noise prediction model follows a simple relationship: + ``` + noise(x_t, t) = -sigma_t * score(x_t, t) + ``` + We support three types of guided sampling by DPMs by setting `guidance_type`: + 1. "uncond": unconditional sampling by DPMs. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + The input `classifier_fn` has the following format: + `` + classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) + `` + [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," + in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. + 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. + The input `model` has the following format: + `` + model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score + `` + And if cond == `unconditional_condition`, the model output is the unconditional DPM output. + [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." + arXiv preprint arXiv:2207.12598 (2022). + + The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) + or continuous-time labels (i.e. epsilon to T). + We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: + `` + def model_fn(x, t_continuous) -> noise: + t_input = get_model_input_time(t_continuous) + return noise_pred(model, x, t_input, **model_kwargs) + `` + where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. + =============================================================== + Args: + model: A diffusion model with the corresponding format described above. + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + model_type: A `str`. The parameterization type of the diffusion model. + "noise" or "x_start" or "v" or "score". + model_kwargs: A `dict`. A dict for the other inputs of the model function. + guidance_type: A `str`. The type of the guidance for sampling. + "uncond" or "classifier" or "classifier-free". + condition: A pytorch tensor. The condition for the guided sampling. + Only used for "classifier" or "classifier-free" guidance type. + unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. + Only used for "classifier-free" guidance type. + guidance_scale: A `float`. The scale for the guided sampling. + classifier_fn: A classifier function. Only used for the classifier guidance. + classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. + Returns: + A noise prediction model that accepts the noised data and the continuous time as the inputs. + """ + + def get_model_input_time(t_continuous): + """ + Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. + For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. + For continuous-time DPMs, we just use `t_continuous`. + """ + if noise_schedule.schedule == 'discrete': + return (t_continuous - 1. / noise_schedule.total_N) * 1000. + else: + return t_continuous + + def noise_pred_fn(x, t_continuous, cond=None): + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + t_input = get_model_input_time(t_continuous) + if cond is None: + output = model(x, t_input, **model_kwargs) + else: + output = model(x, t_input, cond, **model_kwargs) + if model_type == "noise": + return output + elif model_type == "x_start": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) + elif model_type == "v": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x + elif model_type == "score": + sigma_t = noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return -expand_dims(sigma_t, dims) * output + + def cond_grad_fn(x, t_input): + """ + Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). + """ + with torch.enable_grad(): + x_in = x.detach().requires_grad_(True) + log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) + return torch.autograd.grad(log_prob.sum(), x_in)[0] + + def model_fn(x, t_continuous): + """ + The noise predicition model function that is used for DPM-Solver. + """ + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + if guidance_type == "uncond": + return noise_pred_fn(x, t_continuous) + elif guidance_type == "classifier": + assert classifier_fn is not None + t_input = get_model_input_time(t_continuous) + cond_grad = cond_grad_fn(x, t_input) + sigma_t = noise_schedule.marginal_std(t_continuous) + noise = noise_pred_fn(x, t_continuous) + return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad + elif guidance_type == "classifier-free": + if guidance_scale == 1. or unconditional_condition is None: + return noise_pred_fn(x, t_continuous, cond=condition) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t_continuous] * 2) + c_in = torch.cat([unconditional_condition, condition]) + noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) + return noise_uncond + guidance_scale * (noise - noise_uncond) + + assert model_type in ["noise", "x_start", "v"] + assert guidance_type in ["uncond", "classifier", "classifier-free"] + return model_fn + + +class DPM_Solver: + def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): + """Construct a DPM-Solver. + We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). + If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). + If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). + In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. + The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. + Args: + model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): + `` + def model_fn(x, t_continuous): + return noise + `` + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. + thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. + max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. + + [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. + """ + self.model = model_fn + self.noise_schedule = noise_schedule + self.predict_x0 = predict_x0 + self.thresholding = thresholding + self.max_val = max_val + + def noise_prediction_fn(self, x, t): + """ + Return the noise prediction model. + """ + return self.model(x, t) + + def data_prediction_fn(self, x, t): + """ + Return the data prediction model (with thresholding). + """ + noise = self.noise_prediction_fn(x, t) + dims = x.dim() + alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) + x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) + if self.thresholding: + p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. + s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) + s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) + x0 = torch.clamp(x0, -s, s) / s + return x0 + + def model_fn(self, x, t): + """ + Convert the model to the noise prediction model or the data prediction model. + """ + if self.predict_x0: + return self.data_prediction_fn(x, t) + else: + return self.noise_prediction_fn(x, t) + + def get_time_steps(self, skip_type, t_T, t_0, N, device): + """Compute the intermediate time steps for sampling. + Args: + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + N: A `int`. The total number of the spacing of the time steps. + device: A torch device. + Returns: + A pytorch tensor of the time steps, with the shape (N + 1,). + """ + if skip_type == 'logSNR': + lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) + lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) + logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) + return self.noise_schedule.inverse_lambda(logSNR_steps) + elif skip_type == 'time_uniform': + return torch.linspace(t_T, t_0, N + 1).to(device) + elif skip_type == 'time_quadratic': + t_order = 2 + t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device) + return t + else: + raise ValueError( + "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) + + def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): + """ + Get the order of each step for sampling by the singlestep DPM-Solver. + We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". + Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: + - If order == 1: + We take `steps` of DPM-Solver-1 (i.e. DDIM). + - If order == 2: + - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of DPM-Solver-2. + - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If order == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. + ============================================ + Args: + order: A `int`. The max order for the solver (2 or 3). + steps: A `int`. The total number of function evaluations (NFE). + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + device: A torch device. + Returns: + orders: A list of the solver order of each step. + """ + if order == 3: + K = steps // 3 + 1 + if steps % 3 == 0: + orders = [3, ] * (K - 2) + [2, 1] + elif steps % 3 == 1: + orders = [3, ] * (K - 1) + [1] + else: + orders = [3, ] * (K - 1) + [2] + elif order == 2: + if steps % 2 == 0: + K = steps // 2 + orders = [2, ] * K + else: + K = steps // 2 + 1 + orders = [2, ] * (K - 1) + [1] + elif order == 1: + K = 1 + orders = [1, ] * steps + else: + raise ValueError("'order' must be '1' or '2' or '3'.") + if skip_type == 'logSNR': + # To reproduce the results in DPM-Solver paper + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) + else: + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ + torch.cumsum(torch.tensor([0, ] + orders)).to(device)] + return timesteps_outer, orders + + def denoise_to_zero_fn(self, x, s): + """ + Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. + """ + return self.data_prediction_fn(x, s) + + def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): + """ + DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + if self.predict_x0: + phi_1 = torch.expm1(-h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + else: + phi_1 = torch.expm1(h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + + def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, + solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-2 from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the second-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 0.5 + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + s1 = ns.inverse_lambda(lambda_s1) + log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff( + s1), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) + alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_1 = torch.expm1(-h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * ( + model_s1 - model_s) + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_1 = torch.expm1(h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) + ) + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1} + else: + return x_t + + def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None, + return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-3 from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). + If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 1. / 3. + if r2 is None: + r2 = 2. / 3. + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + lambda_s2 = lambda_s + r2 * h + s1 = ns.inverse_lambda(lambda_s1) + s2 = ns.inverse_lambda(lambda_s2) + log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff( + s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std( + s2), ns.marginal_std(t) + alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_12 = torch.expm1(-r2 * h) + phi_1 = torch.expm1(-h) + phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. + phi_2 = phi_1 / h + 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(sigma_s2 / sigma_s, dims) * x + - expand_dims(alpha_s2 * phi_12, dims) * model_s + + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + expand_dims(alpha_t * phi_2, dims) * D1 + - expand_dims(alpha_t * phi_3, dims) * D2 + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_12 = torch.expm1(r2 * h) + phi_1 = torch.expm1(h) + phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. + phi_2 = phi_1 / h - 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x + - expand_dims(sigma_s2 * phi_12, dims) * model_s + - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - expand_dims(sigma_t * phi_2, dims) * D1 + - expand_dims(sigma_t * phi_3, dims) * D2 + ) + + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} + else: + return x_t + + def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): + """ + Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + ns = self.noise_schedule + dims = x.dim() + model_prev_1, model_prev_0 = model_prev_list + t_prev_1, t_prev_0 = t_prev_list + lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( + t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0 = h_0 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + if self.predict_x0: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 + ) + else: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 + ) + return x_t + + def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): + """ + Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + model_prev_2, model_prev_1, model_prev_0 = model_prev_list + t_prev_2, t_prev_1, t_prev_0 = t_prev_list + lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda( + t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_1 = lambda_prev_1 - lambda_prev_2 + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0, r1 = h_0 / h, h_1 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) + D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) + D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) + if self.predict_x0: + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 + - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2 + ) + else: + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 + - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2 + ) + return x_t + + def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, + r2=None): + """ + Singlestep DPM-Solver with the order `order` from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + r1: A `float`. The hyperparameter of the second-order or third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) + elif order == 2: + return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, + solver_type=solver_type, r1=r1) + elif order == 3: + return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, + solver_type=solver_type, r1=r1, r2=r2) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): + """ + Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) + elif order == 2: + return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + elif order == 3: + return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, + solver_type='dpm_solver'): + """ + The adaptive step size solver based on singlestep DPM-Solver. + Args: + x: A pytorch tensor. The initial value at time `t_T`. + order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + h_init: A `float`. The initial step size (for logSNR). + atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. + rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. + theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. + t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the + current time and `t_0` is less than `t_err`. The default setting is 1e-5. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_0: A pytorch tensor. The approximated solution at time `t_0`. + [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. + """ + ns = self.noise_schedule + s = t_T * torch.ones((x.shape[0],)).to(x) + lambda_s = ns.marginal_lambda(s) + lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) + h = h_init * torch.ones_like(s).to(x) + x_prev = x + nfe = 0 + if order == 2: + r1 = 0.5 + lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, + solver_type=solver_type, + **kwargs) + elif order == 3: + r1, r2 = 1. / 3., 2. / 3. + lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, + return_intermediate=True, + solver_type=solver_type) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, + solver_type=solver_type, + **kwargs) + else: + raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) + while torch.abs((s - t_0)).mean() > t_err: + t = ns.inverse_lambda(lambda_s + h) + x_lower, lower_noise_kwargs = lower_update(x, s, t) + x_higher = higher_update(x, s, t, **lower_noise_kwargs) + delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) + norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) + E = norm_fn((x_higher - x_lower) / delta).max() + if torch.all(E <= 1.): + x = x_higher + s = t + x_prev = x_lower + lambda_s = ns.marginal_lambda(s) + h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) + nfe += order + print('adaptive solver nfe', nfe) + return x + + def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', + method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', + atol=0.0078, rtol=0.05, + ): + """ + Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. + ===================================================== + We support the following algorithms for both noise prediction model and data prediction model: + - 'singlestep': + Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. + We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). + The total number of function evaluations (NFE) == `steps`. + Given a fixed NFE == `steps`, the sampling procedure is: + - If `order` == 1: + - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. + - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If `order` == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. + - 'multistep': + Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. + We initialize the first `order` values by lower order multistep solvers. + Given a fixed NFE == `steps`, the sampling procedure is: + Denote K = steps. + - If `order` == 1: + - We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. + - If `order` == 3: + - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. + - 'singlestep_fixed': + Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). + We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. + - 'adaptive': + Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). + We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. + You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs + (NFE) and the sample quality. + - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. + - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. + ===================================================== + Some advices for choosing the algorithm: + - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: + Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, + skip_type='time_uniform', method='singlestep') + - For **guided sampling with large guidance scale** by DPMs: + Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, + skip_type='time_uniform', method='multistep') + We support three types of `skip_type`: + - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** + - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. + - 'time_quadratic': quadratic time for the time steps. + ===================================================== + Args: + x: A pytorch tensor. The initial value at time `t_start` + e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. + steps: A `int`. The total number of function evaluations (NFE). + t_start: A `float`. The starting time of the sampling. + If `T` is None, we use self.noise_schedule.T (default is 1.0). + t_end: A `float`. The ending time of the sampling. + If `t_end` is None, we use 1. / self.noise_schedule.total_N. + e.g. if total_N == 1000, we have `t_end` == 1e-3. + For discrete-time DPMs: + - We recommend `t_end` == 1. / self.noise_schedule.total_N. + For continuous-time DPMs: + - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. + order: A `int`. The order of DPM-Solver. + skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. + method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. + denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. + Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). + This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and + score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID + for diffusion models sampling by diffusion SDEs for low-resolutional images + (such as CIFAR-10). However, we observed that such trick does not matter for + high-resolutional images. As it needs an additional NFE, we do not recommend + it for high-resolutional images. + lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. + Only valid for `method=multistep` and `steps < 15`. We empirically find that + this trick is a key to stabilizing the sampling by DPM-Solver with very few steps + (especially for steps <= 10). So we recommend to set it to be `True`. + solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. + atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + Returns: + x_end: A pytorch tensor. The approximated solution at time `t_end`. + """ + t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + t_T = self.noise_schedule.T if t_start is None else t_start + device = x.device + if method == 'adaptive': + with torch.no_grad(): + x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, + solver_type=solver_type) + elif method == 'multistep': + assert steps >= order + timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) + assert timesteps.shape[0] - 1 == steps + with torch.no_grad(): + vec_t = timesteps[0].expand((x.shape[0])) + model_prev_list = [self.model_fn(x, vec_t)] + t_prev_list = [vec_t] + # Init the first `order` values by lower order multistep DPM-Solver. + for init_order in tqdm(range(1, order), desc="DPM init order"): + vec_t = timesteps[init_order].expand(x.shape[0]) + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, + solver_type=solver_type) + model_prev_list.append(self.model_fn(x, vec_t)) + t_prev_list.append(vec_t) + # Compute the remaining values by `order`-th order multistep DPM-Solver. + for step in tqdm(range(order, steps + 1), desc="DPM multistep"): + vec_t = timesteps[step].expand(x.shape[0]) + if lower_order_final and steps < 15: + step_order = min(order, steps + 1 - step) + else: + step_order = order + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, + solver_type=solver_type) + for i in range(order - 1): + t_prev_list[i] = t_prev_list[i + 1] + model_prev_list[i] = model_prev_list[i + 1] + t_prev_list[-1] = vec_t + # We do not need to evaluate the final model value. + if step < steps: + model_prev_list[-1] = self.model_fn(x, vec_t) + elif method in ['singlestep', 'singlestep_fixed']: + if method == 'singlestep': + timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, + skip_type=skip_type, + t_T=t_T, t_0=t_0, + device=device) + elif method == 'singlestep_fixed': + K = steps // order + orders = [order, ] * K + timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) + for i, order in enumerate(orders): + t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] + timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), + N=order, device=device) + lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) + vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) + h = lambda_inner[-1] - lambda_inner[0] + r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h + r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h + x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) + if denoise_to_zero: + x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) + return x + + +############################################################# +# other utility functions +############################################################# + +def interpolate_fn(x, xp, yp): + """ + A piecewise linear function y = f(x), using xp and yp as keypoints. + We implement f(x) in a differentiable way (i.e. applicable for autograd). + The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) + Args: + x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). + xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. + yp: PyTorch tensor with shape [C, K]. + Returns: + The function values f(x), with shape [N, C]. + """ + N, K = x.shape[0], xp.shape[1] + all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) + sorted_all_x, x_indices = torch.sort(all_x, dim=2) + x_idx = torch.argmin(x_indices, dim=2) + cand_start_idx = x_idx - 1 + start_idx = torch.where( + torch.eq(x_idx, 0), + torch.tensor(1, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) + start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) + end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) + start_idx2 = torch.where( + torch.eq(x_idx, 0), + torch.tensor(0, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) + start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) + end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) + cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) + return cand + + +def expand_dims(v, dims): + """ + Expand the tensor `v` to the dim `dims`. + Args: + `v`: a PyTorch tensor with shape [N]. + `dim`: a `int`. + Returns: + a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. + """ + return v[(...,) + (None,) * (dims - 1)] \ No newline at end of file diff --git a/iopaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py b/iopaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py new file mode 100644 index 0000000..7d137b8 --- /dev/null +++ b/iopaint/model/anytext/ldm/models/diffusion/dpm_solver/sampler.py @@ -0,0 +1,87 @@ +"""SAMPLING ONLY.""" +import torch + +from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver + + +MODEL_TYPES = { + "eps": "noise", + "v": "v" +} + + +class DPMSolverSampler(object): + def __init__(self, model, **kwargs): + super().__init__() + self.model = model + to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) + self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + + print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') + + device = self.model.betas.device + if x_T is None: + img = torch.randn(size, device=device) + else: + img = x_T + + ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) + + model_fn = model_wrapper( + lambda x, t, c: self.model.apply_model(x, t, c), + ns, + model_type=MODEL_TYPES[self.model.parameterization], + guidance_type="classifier-free", + condition=conditioning, + unconditional_condition=unconditional_conditioning, + guidance_scale=unconditional_guidance_scale, + ) + + dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) + x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) + + return x.to(device), None \ No newline at end of file diff --git a/iopaint/model/anytext/ldm/models/diffusion/plms.py b/iopaint/model/anytext/ldm/models/diffusion/plms.py new file mode 100644 index 0000000..5f35d55 --- /dev/null +++ b/iopaint/model/anytext/ldm/models/diffusion/plms.py @@ -0,0 +1,244 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like +from iopaint.model.anytext.ldm.models.diffusion.sampling_util import norm_thresholding + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + dynamic_threshold=None, + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, + dynamic_threshold=None): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next, + dynamic_threshold=dynamic_threshold) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, + dynamic_threshold=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + if dynamic_threshold is not None: + pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/iopaint/model/anytext/ldm/models/diffusion/sampling_util.py b/iopaint/model/anytext/ldm/models/diffusion/sampling_util.py new file mode 100644 index 0000000..7eff02b --- /dev/null +++ b/iopaint/model/anytext/ldm/models/diffusion/sampling_util.py @@ -0,0 +1,22 @@ +import torch +import numpy as np + + +def append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions. + From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" + dims_to_append = target_dims - x.ndim + if dims_to_append < 0: + raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') + return x[(...,) + (None,) * dims_to_append] + + +def norm_thresholding(x0, value): + s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) + return x0 * (value / s) + + +def spatial_norm_thresholding(x0, value): + # b c h w + s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) + return x0 * (value / s) \ No newline at end of file diff --git a/iopaint/model/anytext/ldm/modules/__init__.py b/iopaint/model/anytext/ldm/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iopaint/model/anytext/ldm/modules/attention.py b/iopaint/model/anytext/ldm/modules/attention.py new file mode 100644 index 0000000..df92aa7 --- /dev/null +++ b/iopaint/model/anytext/ldm/modules/attention.py @@ -0,0 +1,360 @@ +from inspect import isfunction +import math +import torch +import torch.nn.functional as F +from torch import nn, einsum +from einops import rearrange, repeat +from typing import Optional, Any + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import checkpoint + + +# CrossAttn precision handling +import os + +_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32") + + +def exists(val): + return val is not None + + +def uniq(arr): + return {el: True for el in arr}.keys() + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def max_neg_value(t): + return -torch.finfo(t.dtype).max + + +def init_(tensor): + dim = tensor.shape[-1] + std = 1 / math.sqrt(dim) + tensor.uniform_(-std, std) + return tensor + + +# feedforward +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = ( + nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) + if not glu + else GEGLU(dim, inner_dim) + ) + + self.net = nn.Sequential( + project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def Normalize(in_channels): + return torch.nn.GroupNorm( + num_groups=32, num_channels=in_channels, eps=1e-6, affine=True + ) + + +class SpatialSelfAttention(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + q = rearrange(q, "b c h w -> b (h w) c") + k = rearrange(k, "b c h w -> b c (h w)") + w_ = torch.einsum("bij,bjk->bik", q, k) + + w_ = w_ * (int(c) ** (-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = rearrange(v, "b c h w -> b c (h w)") + w_ = rearrange(w_, "b i j -> b j i") + h_ = torch.einsum("bij,bjk->bik", v, w_) + h_ = rearrange(h_, "b c (h w) -> b c h w", h=h) + h_ = self.proj_out(h_) + + return x + h_ + + +class CrossAttention(nn.Module): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): + super().__init__() + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head**-0.5 + self.heads = heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) + ) + + def forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + + q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v)) + + # force cast to fp32 to avoid overflowing + if _ATTN_PRECISION == "fp32": + with torch.autocast(enabled=False, device_type="cuda"): + q, k = q.float(), k.float() + sim = einsum("b i d, b j d -> b i j", q, k) * self.scale + else: + sim = einsum("b i d, b j d -> b i j", q, k) * self.scale + + del q, k + + if exists(mask): + mask = rearrange(mask, "b ... -> b (...)") + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, "b j -> (b h) () j", h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + sim = sim.softmax(dim=-1) + + out = einsum("b i j, b j d -> b i d", sim, v) + out = rearrange(out, "(b h) n d -> b n (h d)", h=h) + return self.to_out(out) + + +class SDPACrossAttention(CrossAttention): + def forward(self, x, context=None, mask=None): + batch_size, sequence_length, inner_dim = x.shape + + if mask is not None: + mask = self.prepare_attention_mask(mask, sequence_length, batch_size) + mask = mask.view(batch_size, self.heads, -1, mask.shape[-1]) + + h = self.heads + q_in = self.to_q(x) + context = default(context, x) + + k_in = self.to_k(context) + v_in = self.to_v(context) + + head_dim = inner_dim // h + q = q_in.view(batch_size, -1, h, head_dim).transpose(1, 2) + k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2) + v = v_in.view(batch_size, -1, h, head_dim).transpose(1, 2) + + del q_in, k_in, v_in + + dtype = q.dtype + if _ATTN_PRECISION == "fp32": + q, k, v = q.float(), k.float(), v.float() + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + hidden_states = torch.nn.functional.scaled_dot_product_attention( + q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape( + batch_size, -1, h * head_dim + ) + hidden_states = hidden_states.to(dtype) + + # linear proj + hidden_states = self.to_out[0](hidden_states) + # dropout + hidden_states = self.to_out[1](hidden_states) + return hidden_states + + +class BasicTransformerBlock(nn.Module): + def __init__( + self, + dim, + n_heads, + d_head, + dropout=0.0, + context_dim=None, + gated_ff=True, + checkpoint=True, + disable_self_attn=False, + ): + super().__init__() + + if hasattr(torch.nn.functional, "scaled_dot_product_attention"): + attn_cls = SDPACrossAttention + else: + attn_cls = CrossAttention + + self.disable_self_attn = disable_self_attn + self.attn1 = attn_cls( + query_dim=dim, + heads=n_heads, + dim_head=d_head, + dropout=dropout, + context_dim=context_dim if self.disable_self_attn else None, + ) # is a self-attention if not self.disable_self_attn + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.attn2 = attn_cls( + query_dim=dim, + context_dim=context_dim, + heads=n_heads, + dim_head=d_head, + dropout=dropout, + ) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.norm3 = nn.LayerNorm(dim) + self.checkpoint = checkpoint + + def forward(self, x, context=None): + return checkpoint( + self._forward, (x, context), self.parameters(), self.checkpoint + ) + + def _forward(self, x, context=None): + x = ( + self.attn1( + self.norm1(x), context=context if self.disable_self_attn else None + ) + + x + ) + x = self.attn2(self.norm2(x), context=context) + x + x = self.ff(self.norm3(x)) + x + return x + + +class SpatialTransformer(nn.Module): + """ + Transformer block for image-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + NEW: use_linear for more efficiency instead of the 1x1 convs + """ + + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + dropout=0.0, + context_dim=None, + disable_self_attn=False, + use_linear=False, + use_checkpoint=True, + ): + super().__init__() + if exists(context_dim) and not isinstance(context_dim, list): + context_dim = [context_dim] + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) + if not use_linear: + self.proj_in = nn.Conv2d( + in_channels, inner_dim, kernel_size=1, stride=1, padding=0 + ) + else: + self.proj_in = nn.Linear(in_channels, inner_dim) + + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + n_heads, + d_head, + dropout=dropout, + context_dim=context_dim[d], + disable_self_attn=disable_self_attn, + checkpoint=use_checkpoint, + ) + for d in range(depth) + ] + ) + if not use_linear: + self.proj_out = zero_module( + nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) + ) + else: + self.proj_out = zero_module(nn.Linear(in_channels, inner_dim)) + self.use_linear = use_linear + + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + if not isinstance(context, list): + context = [context] + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + if not self.use_linear: + x = self.proj_in(x) + x = rearrange(x, "b c h w -> b (h w) c").contiguous() + if self.use_linear: + x = self.proj_in(x) + for i, block in enumerate(self.transformer_blocks): + x = block(x, context=context[i]) + if self.use_linear: + x = self.proj_out(x) + x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous() + if not self.use_linear: + x = self.proj_out(x) + return x + x_in diff --git a/iopaint/model/anytext/ldm/modules/diffusionmodules/__init__.py b/iopaint/model/anytext/ldm/modules/diffusionmodules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iopaint/model/anytext/ldm/modules/diffusionmodules/model.py b/iopaint/model/anytext/ldm/modules/diffusionmodules/model.py new file mode 100644 index 0000000..3472824 --- /dev/null +++ b/iopaint/model/anytext/ldm/modules/diffusionmodules/model.py @@ -0,0 +1,973 @@ +# pytorch_diffusion + derived encoder decoder +import math + +import numpy as np +import torch +import torch.nn as nn + + +def get_timestep_embedding(timesteps, embedding_dim): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: + From Fairseq. + Build sinusoidal embeddings. + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + assert len(timesteps.shape) == 1 + + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) + emb = emb.to(device=timesteps.device) + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + return emb + + +def nonlinearity(x): + # swish + return x * torch.sigmoid(x) + + +def Normalize(in_channels, num_groups=32): + return torch.nn.GroupNorm( + num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True + ) + + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=2, padding=0 + ) + + def forward(self, x): + if self.with_conv: + pad = (0, 1, 0, 1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + + +class ResnetBlock(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + conv_shortcut=False, + dropout, + temb_channels=512, + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + if temb_channels > 0: + self.temb_proj = torch.nn.Linear(temb_channels, out_channels) + self.norm2 = Normalize(out_channels) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d( + out_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + self.conv_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + else: + self.nin_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x, temb): + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + if temb is not None: + h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x + h + + +class AttnBlock(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + q = q.reshape(b, c, h * w) + q = q.permute(0, 2, 1) # b,hw,c + k = k.reshape(b, c, h * w) # b,c,hw + w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w_ = w_ * (int(c) ** (-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b, c, h * w) + w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) + h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + h_ = h_.reshape(b, c, h, w) + + h_ = self.proj_out(h_) + + return x + h_ + + +class AttnBlock2_0(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + # output: [1, 512, 64, 64] + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + + # q = q.reshape(b, c, h * w).transpose() + # q = q.permute(0, 2, 1) # b,hw,c + # k = k.reshape(b, c, h * w) # b,c,hw + q = q.transpose(1, 2) + k = k.transpose(1, 2) + v = v.transpose(1, 2) + # (batch, num_heads, seq_len, head_dim) + hidden_states = torch.nn.functional.scaled_dot_product_attention( + q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False + ) + hidden_states = hidden_states.transpose(1, 2) + hidden_states = hidden_states.to(q.dtype) + + h_ = self.proj_out(hidden_states) + + return x + h_ + + +def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): + assert attn_type in [ + "vanilla", + "vanilla-xformers", + "memory-efficient-cross-attn", + "linear", + "none", + ], f"attn_type {attn_type} unknown" + assert attn_kwargs is None + if hasattr(torch.nn.functional, "scaled_dot_product_attention"): + # print(f"Using torch.nn.functional.scaled_dot_product_attention") + return AttnBlock2_0(in_channels) + return AttnBlock(in_channels) + + +class Model(nn.Module): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + use_timestep=True, + use_linear_attn=False, + attn_type="vanilla", + ): + super().__init__() + if use_linear_attn: + attn_type = "linear" + self.ch = ch + self.temb_ch = self.ch * 4 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + self.use_timestep = use_timestep + if self.use_timestep: + # timestep embedding + self.temb = nn.Module() + self.temb.dense = nn.ModuleList( + [ + torch.nn.Linear(self.ch, self.temb_ch), + torch.nn.Linear(self.temb_ch, self.temb_ch), + ] + ) + + # downsampling + self.conv_in = torch.nn.Conv2d( + in_channels, self.ch, kernel_size=3, stride=1, padding=1 + ) + + curr_res = resolution + in_ch_mult = (1,) + tuple(ch_mult) + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch * ch_mult[i_level] + skip_in = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + if i_block == self.num_res_blocks: + skip_in = ch * in_ch_mult[i_level] + block.append( + ResnetBlock( + in_channels=block_in + skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x, t=None, context=None): + # assert x.shape[2] == x.shape[3] == self.resolution + if context is not None: + # assume aligned context, cat along channel axis + x = torch.cat((x, context), dim=1) + if self.use_timestep: + # timestep embedding + assert t is not None + temb = get_timestep_embedding(t, self.ch) + temb = self.temb.dense[0](temb) + temb = nonlinearity(temb) + temb = self.temb.dense[1](temb) + else: + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions - 1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block]( + torch.cat([h, hs.pop()], dim=1), temb + ) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + def get_last_layer(self): + return self.conv_out.weight + + +class Encoder(nn.Module): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + z_channels, + double_z=True, + use_linear_attn=False, + attn_type="vanilla", + **ignore_kwargs, + ): + super().__init__() + if use_linear_attn: + attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + # downsampling + self.conv_in = torch.nn.Conv2d( + in_channels, self.ch, kernel_size=3, stride=1, padding=1 + ) + + curr_res = resolution + in_ch_mult = (1,) + tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions - 1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, + 2 * z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1, + ) + + def forward(self, x): + # timestep embedding + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions - 1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + z_channels, + give_pre_end=False, + tanh_out=False, + use_linear_attn=False, + attn_type="vanilla", + **ignorekwargs, + ): + super().__init__() + if use_linear_attn: + attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.tanh_out = tanh_out + + # compute in_ch_mult, block_in and curr_res at lowest res + in_ch_mult = (1,) + tuple(ch_mult) + block_in = ch * ch_mult[self.num_resolutions - 1] + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + print( + "Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape) + ) + ) + + # z to block_in + self.conv_in = torch.nn.Conv2d( + z_channels, block_in, kernel_size=3, stride=1, padding=1 + ) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, z): + # assert z.shape[1:] == self.z_shape[1:] + self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h, temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + if self.tanh_out: + h = torch.tanh(h) + return h + + +class SimpleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, *args, **kwargs): + super().__init__() + self.model = nn.ModuleList( + [ + nn.Conv2d(in_channels, in_channels, 1), + ResnetBlock( + in_channels=in_channels, + out_channels=2 * in_channels, + temb_channels=0, + dropout=0.0, + ), + ResnetBlock( + in_channels=2 * in_channels, + out_channels=4 * in_channels, + temb_channels=0, + dropout=0.0, + ), + ResnetBlock( + in_channels=4 * in_channels, + out_channels=2 * in_channels, + temb_channels=0, + dropout=0.0, + ), + nn.Conv2d(2 * in_channels, in_channels, 1), + Upsample(in_channels, with_conv=True), + ] + ) + # end + self.norm_out = Normalize(in_channels) + self.conv_out = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x): + for i, layer in enumerate(self.model): + if i in [1, 2, 3]: + x = layer(x, None) + else: + x = layer(x) + + h = self.norm_out(x) + h = nonlinearity(h) + x = self.conv_out(h) + return x + + +class UpsampleDecoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + ch, + num_res_blocks, + resolution, + ch_mult=(2, 2), + dropout=0.0, + ): + super().__init__() + # upsampling + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + block_in = in_channels + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.res_blocks = nn.ModuleList() + self.upsample_blocks = nn.ModuleList() + for i_level in range(self.num_resolutions): + res_block = [] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + res_block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) + block_in = block_out + self.res_blocks.append(nn.ModuleList(res_block)) + if i_level != self.num_resolutions - 1: + self.upsample_blocks.append(Upsample(block_in, True)) + curr_res = curr_res * 2 + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d( + block_in, out_channels, kernel_size=3, stride=1, padding=1 + ) + + def forward(self, x): + # upsampling + h = x + for k, i_level in enumerate(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.res_blocks[i_level][i_block](h, None) + if i_level != self.num_resolutions - 1: + h = self.upsample_blocks[k](h) + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class LatentRescaler(nn.Module): + def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): + super().__init__() + # residual block, interpolate, residual block + self.factor = factor + self.conv_in = nn.Conv2d( + in_channels, mid_channels, kernel_size=3, stride=1, padding=1 + ) + self.res_block1 = nn.ModuleList( + [ + ResnetBlock( + in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0, + ) + for _ in range(depth) + ] + ) + self.attn = AttnBlock(mid_channels) + self.res_block2 = nn.ModuleList( + [ + ResnetBlock( + in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0, + ) + for _ in range(depth) + ] + ) + + self.conv_out = nn.Conv2d( + mid_channels, + out_channels, + kernel_size=1, + ) + + def forward(self, x): + x = self.conv_in(x) + for block in self.res_block1: + x = block(x, None) + x = torch.nn.functional.interpolate( + x, + size=( + int(round(x.shape[2] * self.factor)), + int(round(x.shape[3] * self.factor)), + ), + ) + x = self.attn(x) + for block in self.res_block2: + x = block(x, None) + x = self.conv_out(x) + return x + + +class MergedRescaleEncoder(nn.Module): + def __init__( + self, + in_channels, + ch, + resolution, + out_ch, + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + ch_mult=(1, 2, 4, 8), + rescale_factor=1.0, + rescale_module_depth=1, + ): + super().__init__() + intermediate_chn = ch * ch_mult[-1] + self.encoder = Encoder( + in_channels=in_channels, + num_res_blocks=num_res_blocks, + ch=ch, + ch_mult=ch_mult, + z_channels=intermediate_chn, + double_z=False, + resolution=resolution, + attn_resolutions=attn_resolutions, + dropout=dropout, + resamp_with_conv=resamp_with_conv, + out_ch=None, + ) + self.rescaler = LatentRescaler( + factor=rescale_factor, + in_channels=intermediate_chn, + mid_channels=intermediate_chn, + out_channels=out_ch, + depth=rescale_module_depth, + ) + + def forward(self, x): + x = self.encoder(x) + x = self.rescaler(x) + return x + + +class MergedRescaleDecoder(nn.Module): + def __init__( + self, + z_channels, + out_ch, + resolution, + num_res_blocks, + attn_resolutions, + ch, + ch_mult=(1, 2, 4, 8), + dropout=0.0, + resamp_with_conv=True, + rescale_factor=1.0, + rescale_module_depth=1, + ): + super().__init__() + tmp_chn = z_channels * ch_mult[-1] + self.decoder = Decoder( + out_ch=out_ch, + z_channels=tmp_chn, + attn_resolutions=attn_resolutions, + dropout=dropout, + resamp_with_conv=resamp_with_conv, + in_channels=None, + num_res_blocks=num_res_blocks, + ch_mult=ch_mult, + resolution=resolution, + ch=ch, + ) + self.rescaler = LatentRescaler( + factor=rescale_factor, + in_channels=z_channels, + mid_channels=tmp_chn, + out_channels=tmp_chn, + depth=rescale_module_depth, + ) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Upsampler(nn.Module): + def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): + super().__init__() + assert out_size >= in_size + num_blocks = int(np.log2(out_size // in_size)) + 1 + factor_up = 1.0 + (out_size % in_size) + print( + f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}" + ) + self.rescaler = LatentRescaler( + factor=factor_up, + in_channels=in_channels, + mid_channels=2 * in_channels, + out_channels=in_channels, + ) + self.decoder = Decoder( + out_ch=out_channels, + resolution=out_size, + z_channels=in_channels, + num_res_blocks=2, + attn_resolutions=[], + in_channels=None, + ch=in_channels, + ch_mult=[ch_mult for _ in range(num_blocks)], + ) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Resize(nn.Module): + def __init__(self, in_channels=None, learned=False, mode="bilinear"): + super().__init__() + self.with_conv = learned + self.mode = mode + if self.with_conv: + print( + f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode" + ) + raise NotImplementedError() + assert in_channels is not None + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=4, stride=2, padding=1 + ) + + def forward(self, x, scale_factor=1.0): + if scale_factor == 1.0: + return x + else: + x = torch.nn.functional.interpolate( + x, mode=self.mode, align_corners=False, scale_factor=scale_factor + ) + return x diff --git a/iopaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py b/iopaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py new file mode 100644 index 0000000..fd3d6be --- /dev/null +++ b/iopaint/model/anytext/ldm/modules/diffusionmodules/openaimodel.py @@ -0,0 +1,786 @@ +from abc import abstractmethod +import math + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import ( + checkpoint, + conv_nd, + linear, + avg_pool_nd, + zero_module, + normalization, + timestep_embedding, +) +from iopaint.model.anytext.ldm.modules.attention import SpatialTransformer +from iopaint.model.anytext.ldm.util import exists + + +# dummy replace +def convert_module_to_f16(x): + pass + +def convert_module_to_f32(x): + pass + + +## go +class AttentionPool2d(nn.Module): + """ + Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py + """ + + def __init__( + self, + spacial_dim: int, + embed_dim: int, + num_heads_channels: int, + output_dim: int = None, + ): + super().__init__() + self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) + self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) + self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) + self.num_heads = embed_dim // num_heads_channels + self.attention = QKVAttention(self.num_heads) + + def forward(self, x): + b, c, *_spatial = x.shape + x = x.reshape(b, c, -1) # NC(HW) + x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) + x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) + x = self.qkv_proj(x) + x = self.attention(x) + x = self.c_proj(x) + return x[:, :, 0] + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, SpatialTransformer): + x = layer(x, context) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + +class TransposedUpsample(nn.Module): + 'Learned 2x upsampling without padding' + def __init__(self, channels, out_channels=None, ks=5): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) + + def forward(self,x): + return self.up(x) + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + use_new_attention_order=False, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels) + self.qkv = conv_nd(1, channels, channels * 3, 1) + if use_new_attention_order: + # split qkv before split heads + self.attention = QKVAttention(self.num_heads) + else: + # split heads before split qkv + self.attention = QKVAttentionLegacy(self.num_heads) + + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x): + return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + #return pt_checkpoint(self._forward, x) # pytorch + + def _forward(self, x): + b, c, *spatial = x.shape + x = x.reshape(b, c, -1) + qkv = self.qkv(self.norm(x)) + h = self.attention(qkv) + h = self.proj_out(h) + return (x + h).reshape(b, c, *spatial) + + +def count_flops_attn(model, _x, y): + """ + A counter for the `thop` package to count the operations in an + attention operation. + Meant to be used like: + macs, params = thop.profile( + model, + inputs=(inputs, timestamps), + custom_ops={QKVAttention: QKVAttention.count_flops}, + ) + """ + b, c, *spatial = y[0].shape + num_spatial = int(np.prod(spatial)) + # We perform two matmuls with the same number of ops. + # The first computes the weight matrix, the second computes + # the combination of the value vectors. + matmul_ops = 2 * b * (num_spatial ** 2) * c + model.total_ops += th.DoubleTensor([matmul_ops]) + + +class QKVAttentionLegacy(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention and splits in a different order. + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.chunk(3, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", + (q * scale).view(bs * self.n_heads, ch, length), + (k * scale).view(bs * self.n_heads, ch, length), + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param use_new_attention_order: use a different attention pattern for potentially + increased efficiency. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + num_classes=None, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + disable_self_attentions=None, + num_attention_blocks=None, + disable_middle_self_attn=False, + use_linear_in_transformer=False, + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + if isinstance(num_res_blocks, int): + self.num_res_blocks = len(channel_mult) * [num_res_blocks] + else: + if len(num_res_blocks) != len(channel_mult): + raise ValueError("provide num_res_blocks either as an int (globally constant) or " + "as a list/tuple (per-level) with the same length as channel_mult") + self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: + # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not + assert len(disable_self_attentions) == len(channel_mult) + if num_attention_blocks is not None: + assert len(num_attention_blocks) == len(self.num_res_blocks) + assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) + print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " + f"This option has LESS priority than attention_resolutions {attention_resolutions}, " + f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " + f"attention will still not be set.") + self.use_fp16 = use_fp16 + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + if self.num_classes is not None: + if isinstance(self.num_classes, int): + self.label_emb = nn.Embedding(num_classes, time_embed_dim) + elif self.num_classes == "continuous": + print("setting up linear c_adm embedding layer") + self.label_emb = nn.Linear(1, time_embed_dim) + else: + raise ValueError() + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for nr in range(self.num_res_blocks[level]): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(self.num_res_blocks[level] + 1): + ich = input_block_chans.pop() + layers = [ + ResBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or i < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ) + ) + if level and i == self.num_res_blocks[level]: + out_ch = ch + layers.append( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + if self.predict_codebook_ids: + self.id_predictor = nn.Sequential( + normalization(ch), + conv_nd(dims, model_channels, n_embed, 1), + #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits + ) + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + self.output_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + self.output_blocks.apply(convert_module_to_f32) + + def forward(self, x, timesteps=None, context=None, y=None,**kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + if self.predict_codebook_ids: + return self.id_predictor(h) + else: + return self.out(h) diff --git a/iopaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py b/iopaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py new file mode 100644 index 0000000..5f92630 --- /dev/null +++ b/iopaint/model/anytext/ldm/modules/diffusionmodules/upscaling.py @@ -0,0 +1,81 @@ +import torch +import torch.nn as nn +import numpy as np +from functools import partial + +from iopaint.model.anytext.ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule +from iopaint.model.anytext.ldm.util import default + + +class AbstractLowScaleModel(nn.Module): + # for concatenating a downsampled image to the latent representation + def __init__(self, noise_schedule_config=None): + super(AbstractLowScaleModel, self).__init__() + if noise_schedule_config is not None: + self.register_schedule(**noise_schedule_config) + + def register_schedule(self, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def forward(self, x): + return x, None + + def decode(self, x): + return x + + +class SimpleImageConcat(AbstractLowScaleModel): + # no noise level conditioning + def __init__(self): + super(SimpleImageConcat, self).__init__(noise_schedule_config=None) + self.max_noise_level = 0 + + def forward(self, x): + # fix to constant noise level + return x, torch.zeros(x.shape[0], device=x.device).long() + + +class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel): + def __init__(self, noise_schedule_config, max_noise_level=1000, to_cuda=False): + super().__init__(noise_schedule_config=noise_schedule_config) + self.max_noise_level = max_noise_level + + def forward(self, x, noise_level=None): + if noise_level is None: + noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long() + else: + assert isinstance(noise_level, torch.Tensor) + z = self.q_sample(x, noise_level) + return z, noise_level + + + diff --git a/iopaint/model/anytext/ldm/modules/diffusionmodules/util.py b/iopaint/model/anytext/ldm/modules/diffusionmodules/util.py new file mode 100644 index 0000000..da29c72 --- /dev/null +++ b/iopaint/model/anytext/ldm/modules/diffusionmodules/util.py @@ -0,0 +1,271 @@ +# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + + +import os +import math +import torch +import torch.nn as nn +import numpy as np +from einops import repeat + +from iopaint.model.anytext.ldm.util import instantiate_from_config + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == "linear": + betas = ( + torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 + ) + + elif schedule == "cosine": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == "sqrt_linear": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + elif schedule == "sqrt": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 + else: + raise ValueError(f"schedule '{schedule}' unknown.") + return betas.numpy() + + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas.to(torch.float32), alphas.to(torch.float32), alphas_prev.astype(np.float32) + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def checkpoint(func, inputs, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + ctx.gpu_autocast_kwargs = {"enabled": torch.is_autocast_enabled(), + "dtype": torch.get_autocast_gpu_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled()} + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(), \ + torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def normalization(channels): + """ + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return GroupNorm32(32, channels) + + +# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. +class SiLU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + # return super().forward(x.float()).type(x.dtype) + return super().forward(x).type(x.dtype) + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +class HybridConditioner(nn.Module): + + def __init__(self, c_concat_config, c_crossattn_config): + super().__init__() + self.concat_conditioner = instantiate_from_config(c_concat_config) + self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) + + def forward(self, c_concat, c_crossattn): + c_concat = self.concat_conditioner(c_concat) + c_crossattn = self.crossattn_conditioner(c_crossattn) + return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/iopaint/model/anytext/ldm/modules/distributions/__init__.py b/iopaint/model/anytext/ldm/modules/distributions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iopaint/model/anytext/ldm/modules/distributions/distributions.py b/iopaint/model/anytext/ldm/modules/distributions/distributions.py new file mode 100644 index 0000000..f2b8ef9 --- /dev/null +++ b/iopaint/model/anytext/ldm/modules/distributions/distributions.py @@ -0,0 +1,92 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.sum(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=[1, 2, 3]) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=[1, 2, 3]) + + def nll(self, sample, dims=[1,2,3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/iopaint/model/anytext/ldm/modules/ema.py b/iopaint/model/anytext/ldm/modules/ema.py new file mode 100644 index 0000000..bded250 --- /dev/null +++ b/iopaint/model/anytext/ldm/modules/ema.py @@ -0,0 +1,80 @@ +import torch +from torch import nn + + +class LitEma(nn.Module): + def __init__(self, model, decay=0.9999, use_num_upates=True): + super().__init__() + if decay < 0.0 or decay > 1.0: + raise ValueError('Decay must be between 0 and 1') + + self.m_name2s_name = {} + self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) + self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates + else torch.tensor(-1, dtype=torch.int)) + + for name, p in model.named_parameters(): + if p.requires_grad: + # remove as '.'-character is not allowed in buffers + s_name = name.replace('.', '') + self.m_name2s_name.update({name: s_name}) + self.register_buffer(s_name, p.clone().detach().data) + + self.collected_params = [] + + def reset_num_updates(self): + del self.num_updates + self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int)) + + def forward(self, model): + decay = self.decay + + if self.num_updates >= 0: + self.num_updates += 1 + decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates)) + + one_minus_decay = 1.0 - decay + + with torch.no_grad(): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + + for key in m_param: + if m_param[key].requires_grad: + sname = self.m_name2s_name[key] + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) + shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) + else: + assert not key in self.m_name2s_name + + def copy_to(self, model): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + for key in m_param: + if m_param[key].requires_grad: + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) + else: + assert not key in self.m_name2s_name + + def store(self, parameters): + """ + Save the current parameters for restoring later. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.collected_params = [param.clone() for param in parameters] + + def restore(self, parameters): + """ + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. + """ + for c_param, param in zip(self.collected_params, parameters): + param.data.copy_(c_param.data) diff --git a/iopaint/model/anytext/ldm/modules/encoders/__init__.py b/iopaint/model/anytext/ldm/modules/encoders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iopaint/model/anytext/ldm/modules/encoders/modules.py b/iopaint/model/anytext/ldm/modules/encoders/modules.py new file mode 100644 index 0000000..ceac395 --- /dev/null +++ b/iopaint/model/anytext/ldm/modules/encoders/modules.py @@ -0,0 +1,411 @@ +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from transformers import ( + T5Tokenizer, + T5EncoderModel, + CLIPTokenizer, + CLIPTextModel, + AutoProcessor, + CLIPVisionModelWithProjection, +) + +from iopaint.model.anytext.ldm.util import count_params + + +def _expand_mask(mask, dtype, tgt_len=None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill( + inverted_mask.to(torch.bool), torch.finfo(dtype).min + ) + + +def _build_causal_attention_mask(bsz, seq_len, dtype): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) + mask.fill_(torch.tensor(torch.finfo(dtype).min)) + mask.triu_(1) # zero out the lower diagonal + mask = mask.unsqueeze(1) # expand mask + return mask + + +class AbstractEncoder(nn.Module): + def __init__(self): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + + +class IdentityEncoder(AbstractEncoder): + def encode(self, x): + return x + + +class ClassEmbedder(nn.Module): + def __init__(self, embed_dim, n_classes=1000, key="class", ucg_rate=0.1): + super().__init__() + self.key = key + self.embedding = nn.Embedding(n_classes, embed_dim) + self.n_classes = n_classes + self.ucg_rate = ucg_rate + + def forward(self, batch, key=None, disable_dropout=False): + if key is None: + key = self.key + # this is for use in crossattn + c = batch[key][:, None] + if self.ucg_rate > 0.0 and not disable_dropout: + mask = 1.0 - torch.bernoulli(torch.ones_like(c) * self.ucg_rate) + c = mask * c + (1 - mask) * torch.ones_like(c) * (self.n_classes - 1) + c = c.long() + c = self.embedding(c) + return c + + def get_unconditional_conditioning(self, bs, device="cuda"): + uc_class = ( + self.n_classes - 1 + ) # 1000 classes --> 0 ... 999, one extra class for ucg (class 1000) + uc = torch.ones((bs,), device=device) * uc_class + uc = {self.key: uc} + return uc + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class FrozenT5Embedder(AbstractEncoder): + """Uses the T5 transformer encoder for text""" + + def __init__( + self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True + ): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl + super().__init__() + self.tokenizer = T5Tokenizer.from_pretrained(version) + self.transformer = T5EncoderModel.from_pretrained(version) + self.device = device + self.max_length = max_length # TODO: typical value? + if freeze: + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + # self.train = disabled_train + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer(input_ids=tokens) + + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + + +class FrozenCLIPEmbedder(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from huggingface)""" + + LAYERS = ["last", "pooled", "hidden"] + + def __init__( + self, + version="openai/clip-vit-large-patch14", + device="cuda", + max_length=77, + freeze=True, + layer="last", + layer_idx=None, + ): # clip-vit-base-patch32 + super().__init__() + assert layer in self.LAYERS + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + self.device = device + self.max_length = max_length + if freeze: + self.freeze() + self.layer = layer + self.layer_idx = layer_idx + if layer == "hidden": + assert layer_idx is not None + assert 0 <= abs(layer_idx) <= 12 + + def freeze(self): + self.transformer = self.transformer.eval() + # self.train = disabled_train + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer( + input_ids=tokens, output_hidden_states=self.layer == "hidden" + ) + if self.layer == "last": + z = outputs.last_hidden_state + elif self.layer == "pooled": + z = outputs.pooler_output[:, None, :] + else: + z = outputs.hidden_states[self.layer_idx] + return z + + def encode(self, text): + return self(text) + + +class FrozenCLIPT5Encoder(AbstractEncoder): + def __init__( + self, + clip_version="openai/clip-vit-large-patch14", + t5_version="google/t5-v1_1-xl", + device="cuda", + clip_max_length=77, + t5_max_length=77, + ): + super().__init__() + self.clip_encoder = FrozenCLIPEmbedder( + clip_version, device, max_length=clip_max_length + ) + self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length) + print( + f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder)*1.e-6:.2f} M parameters, " + f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder)*1.e-6:.2f} M params." + ) + + def encode(self, text): + return self(text) + + def forward(self, text): + clip_z = self.clip_encoder.encode(text) + t5_z = self.t5_encoder.encode(text) + return [clip_z, t5_z] + + +class FrozenCLIPEmbedderT3(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from Hugging Face)""" + + def __init__( + self, + version="openai/clip-vit-large-patch14", + device="cuda", + max_length=77, + freeze=True, + use_vision=False, + ): + super().__init__() + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + if use_vision: + self.vit = CLIPVisionModelWithProjection.from_pretrained(version) + self.processor = AutoProcessor.from_pretrained(version) + self.device = device + self.max_length = max_length + if freeze: + self.freeze() + + def embedding_forward( + self, + input_ids=None, + position_ids=None, + inputs_embeds=None, + embedding_manager=None, + ): + seq_length = ( + input_ids.shape[-1] + if input_ids is not None + else inputs_embeds.shape[-2] + ) + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + if inputs_embeds is None: + inputs_embeds = self.token_embedding(input_ids) + if embedding_manager is not None: + inputs_embeds = embedding_manager(input_ids, inputs_embeds) + position_embeddings = self.position_embedding(position_ids) + embeddings = inputs_embeds + position_embeddings + return embeddings + + self.transformer.text_model.embeddings.forward = embedding_forward.__get__( + self.transformer.text_model.embeddings + ) + + def encoder_forward( + self, + inputs_embeds, + attention_mask=None, + causal_attention_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + return hidden_states + + self.transformer.text_model.encoder.forward = encoder_forward.__get__( + self.transformer.text_model.encoder + ) + + def text_encoder_forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + embedding_manager=None, + ): + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + if input_ids is None: + raise ValueError("You have to specify either input_ids") + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + hidden_states = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + embedding_manager=embedding_manager, + ) + bsz, seq_len = input_shape + # CLIP's text model uses causal mask, prepare it here. + # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 + causal_attention_mask = _build_causal_attention_mask( + bsz, seq_len, hidden_states.dtype + ).to(hidden_states.device) + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, hidden_states.dtype) + last_hidden_state = self.encoder( + inputs_embeds=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + last_hidden_state = self.final_layer_norm(last_hidden_state) + return last_hidden_state + + self.transformer.text_model.forward = text_encoder_forward.__get__( + self.transformer.text_model + ) + + def transformer_forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + embedding_manager=None, + ): + return self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + embedding_manager=embedding_manager, + ) + + self.transformer.forward = transformer_forward.__get__(self.transformer) + + def freeze(self): + self.transformer = self.transformer.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text, **kwargs): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + z = self.transformer(input_ids=tokens, **kwargs) + return z + + def encode(self, text, **kwargs): + return self(text, **kwargs) diff --git a/iopaint/model/anytext/ldm/util.py b/iopaint/model/anytext/ldm/util.py new file mode 100644 index 0000000..d456a86 --- /dev/null +++ b/iopaint/model/anytext/ldm/util.py @@ -0,0 +1,197 @@ +import importlib + +import torch +from torch import optim +import numpy as np + +from inspect import isfunction +from PIL import Image, ImageDraw, ImageFont + + +def log_txt_as_img(wh, xc, size=10): + # wh a tuple of (width, height) + # xc a list of captions to plot + b = len(xc) + txts = list() + for bi in range(b): + txt = Image.new("RGB", wh, color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype('font/Arial_Unicode.ttf', size=size) + nc = int(32 * (wh[0] / 256)) + lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) + + try: + draw.text((0, 0), lines, fill="black", font=font) + except UnicodeEncodeError: + print("Cant encode string for logging. Skipping.") + + txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 + txts.append(txt) + txts = np.stack(txts) + txts = torch.tensor(txts) + return txts + + +def ismap(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] > 3) + + +def isimage(x): + if not isinstance(x,torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def mean_flat(tensor): + """ + https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def count_params(model, verbose=False): + total_params = sum(p.numel() for p in model.parameters()) + if verbose: + print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") + return total_params + + +def instantiate_from_config(config, **kwargs): + if "target" not in config: + if config == '__is_first_stage__': + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict()), **kwargs) + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +class AdamWwithEMAandWings(optim.Optimizer): + # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298 + def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using + weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code + ema_power=1., param_names=()): + """AdamW that saves EMA versions of the parameters.""" + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= ema_decay <= 1.0: + raise ValueError("Invalid ema_decay value: {}".format(ema_decay)) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay, + ema_power=ema_power, param_names=param_names) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + ema_params_with_grad = [] + state_sums = [] + max_exp_avg_sqs = [] + state_steps = [] + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + ema_decay = group['ema_decay'] + ema_power = group['ema_power'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('AdamW does not support sparse gradients') + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of parameter values + state['param_exp_avg'] = p.detach().float().clone() + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + ema_params_with_grad.append(state['param_exp_avg']) + + if amsgrad: + max_exp_avg_sqs.append(state['max_exp_avg_sq']) + + # update the steps for each param group update + state['step'] += 1 + # record the step after step update + state_steps.append(state['step']) + + optim._functional.adamw(params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + maximize=False) + + cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power) + for param, ema_param in zip(params_with_grad, ema_params_with_grad): + ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay) + + return loss \ No newline at end of file diff --git a/iopaint/model/anytext/main.py b/iopaint/model/anytext/main.py new file mode 100644 index 0000000..f7b2d2e --- /dev/null +++ b/iopaint/model/anytext/main.py @@ -0,0 +1,45 @@ +import cv2 +import os + +from anytext_pipeline import AnyTextPipeline +from utils import save_images + +seed = 66273235 +# seed_everything(seed) + +pipe = AnyTextPipeline( + ckpt_path="/Users/cwq/code/github/IOPaint/iopaint/model/anytext/anytext_v1.1_fp16.ckpt", + font_path="/Users/cwq/code/github/AnyText/anytext/font/SourceHanSansSC-Medium.otf", + use_fp16=False, + device="mps", +) + +img_save_folder = "SaveImages" +rgb_image = cv2.imread( + "/Users/cwq/code/github/AnyText/anytext/example_images/ref7.jpg" +)[..., ::-1] + +masked_image = cv2.imread( + "/Users/cwq/code/github/AnyText/anytext/example_images/edit7.png" +)[..., ::-1] + +rgb_image = cv2.resize(rgb_image, (512, 512)) +masked_image = cv2.resize(masked_image, (512, 512)) + +# results: list of rgb ndarray +results, rtn_code, rtn_warning = pipe( + prompt='A cake with colorful characters that reads "EVERYDAY", best quality, extremely detailed,4k, HD, supper legible text, clear text edges, clear strokes, neat writing, no watermarks', + negative_prompt="low-res, bad anatomy, extra digit, fewer digits, cropped, worst quality, low quality, watermark, unreadable text, messy words, distorted text, disorganized writing, advertising picture", + image=rgb_image, + masked_image=masked_image, + num_inference_steps=20, + strength=1.0, + guidance_scale=9.0, + height=rgb_image.shape[0], + width=rgb_image.shape[1], + seed=seed, + sort_priority="y", +) +if rtn_code >= 0: + save_images(results, img_save_folder) + print(f"Done, result images are saved in: {img_save_folder}") diff --git a/iopaint/model/anytext/ocr_recog/RNN.py b/iopaint/model/anytext/ocr_recog/RNN.py new file mode 100755 index 0000000..cf16855 --- /dev/null +++ b/iopaint/model/anytext/ocr_recog/RNN.py @@ -0,0 +1,210 @@ +from torch import nn +import torch +from .RecSVTR import Block + +class Swish(nn.Module): + def __int__(self): + super(Swish, self).__int__() + + def forward(self,x): + return x*torch.sigmoid(x) + +class Im2Im(nn.Module): + def __init__(self, in_channels, **kwargs): + super().__init__() + self.out_channels = in_channels + + def forward(self, x): + return x + +class Im2Seq(nn.Module): + def __init__(self, in_channels, **kwargs): + super().__init__() + self.out_channels = in_channels + + def forward(self, x): + B, C, H, W = x.shape + # assert H == 1 + x = x.reshape(B, C, H * W) + x = x.permute((0, 2, 1)) + return x + +class EncoderWithRNN(nn.Module): + def __init__(self, in_channels,**kwargs): + super(EncoderWithRNN, self).__init__() + hidden_size = kwargs.get('hidden_size', 256) + self.out_channels = hidden_size * 2 + self.lstm = nn.LSTM(in_channels, hidden_size, bidirectional=True, num_layers=2,batch_first=True) + + def forward(self, x): + self.lstm.flatten_parameters() + x, _ = self.lstm(x) + return x + +class SequenceEncoder(nn.Module): + def __init__(self, in_channels, encoder_type='rnn', **kwargs): + super(SequenceEncoder, self).__init__() + self.encoder_reshape = Im2Seq(in_channels) + self.out_channels = self.encoder_reshape.out_channels + self.encoder_type = encoder_type + if encoder_type == 'reshape': + self.only_reshape = True + else: + support_encoder_dict = { + 'reshape': Im2Seq, + 'rnn': EncoderWithRNN, + 'svtr': EncoderWithSVTR + } + assert encoder_type in support_encoder_dict, '{} must in {}'.format( + encoder_type, support_encoder_dict.keys()) + + self.encoder = support_encoder_dict[encoder_type]( + self.encoder_reshape.out_channels,**kwargs) + self.out_channels = self.encoder.out_channels + self.only_reshape = False + + def forward(self, x): + if self.encoder_type != 'svtr': + x = self.encoder_reshape(x) + if not self.only_reshape: + x = self.encoder(x) + return x + else: + x = self.encoder(x) + x = self.encoder_reshape(x) + return x + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=0, + bias_attr=False, + groups=1, + act=nn.GELU): + super().__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + # weight_attr=paddle.ParamAttr(initializer=nn.initializer.KaimingUniform()), + bias=bias_attr) + self.norm = nn.BatchNorm2d(out_channels) + self.act = Swish() + + def forward(self, inputs): + out = self.conv(inputs) + out = self.norm(out) + out = self.act(out) + return out + + +class EncoderWithSVTR(nn.Module): + def __init__( + self, + in_channels, + dims=64, # XS + depth=2, + hidden_dims=120, + use_guide=False, + num_heads=8, + qkv_bias=True, + mlp_ratio=2.0, + drop_rate=0.1, + attn_drop_rate=0.1, + drop_path=0., + qk_scale=None): + super(EncoderWithSVTR, self).__init__() + self.depth = depth + self.use_guide = use_guide + self.conv1 = ConvBNLayer( + in_channels, in_channels // 8, padding=1, act='swish') + self.conv2 = ConvBNLayer( + in_channels // 8, hidden_dims, kernel_size=1, act='swish') + + self.svtr_block = nn.ModuleList([ + Block( + dim=hidden_dims, + num_heads=num_heads, + mixer='Global', + HW=None, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer='swish', + attn_drop=attn_drop_rate, + drop_path=drop_path, + norm_layer='nn.LayerNorm', + epsilon=1e-05, + prenorm=False) for i in range(depth) + ]) + self.norm = nn.LayerNorm(hidden_dims, eps=1e-6) + self.conv3 = ConvBNLayer( + hidden_dims, in_channels, kernel_size=1, act='swish') + # last conv-nxn, the input is concat of input tensor and conv3 output tensor + self.conv4 = ConvBNLayer( + 2 * in_channels, in_channels // 8, padding=1, act='swish') + + self.conv1x1 = ConvBNLayer( + in_channels // 8, dims, kernel_size=1, act='swish') + self.out_channels = dims + self.apply(self._init_weights) + + def _init_weights(self, m): + # weight initialization + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.ConvTranspose2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + + def forward(self, x): + # for use guide + if self.use_guide: + z = x.clone() + z.stop_gradient = True + else: + z = x + # for short cut + h = z + # reduce dim + z = self.conv1(z) + z = self.conv2(z) + # SVTR global block + B, C, H, W = z.shape + z = z.flatten(2).permute(0, 2, 1) + + for blk in self.svtr_block: + z = blk(z) + + z = self.norm(z) + # last stage + z = z.reshape([-1, H, W, C]).permute(0, 3, 1, 2) + z = self.conv3(z) + z = torch.cat((h, z), dim=1) + z = self.conv1x1(self.conv4(z)) + + return z + +if __name__=="__main__": + svtrRNN = EncoderWithSVTR(56) + print(svtrRNN) \ No newline at end of file diff --git a/iopaint/model/anytext/ocr_recog/RecCTCHead.py b/iopaint/model/anytext/ocr_recog/RecCTCHead.py new file mode 100755 index 0000000..867ede9 --- /dev/null +++ b/iopaint/model/anytext/ocr_recog/RecCTCHead.py @@ -0,0 +1,48 @@ +from torch import nn + + +class CTCHead(nn.Module): + def __init__(self, + in_channels, + out_channels=6625, + fc_decay=0.0004, + mid_channels=None, + return_feats=False, + **kwargs): + super(CTCHead, self).__init__() + if mid_channels is None: + self.fc = nn.Linear( + in_channels, + out_channels, + bias=True,) + else: + self.fc1 = nn.Linear( + in_channels, + mid_channels, + bias=True, + ) + self.fc2 = nn.Linear( + mid_channels, + out_channels, + bias=True, + ) + + self.out_channels = out_channels + self.mid_channels = mid_channels + self.return_feats = return_feats + + def forward(self, x, labels=None): + if self.mid_channels is None: + predicts = self.fc(x) + else: + x = self.fc1(x) + predicts = self.fc2(x) + + if self.return_feats: + result = dict() + result['ctc'] = predicts + result['ctc_neck'] = x + else: + result = predicts + + return result diff --git a/iopaint/model/anytext/ocr_recog/RecModel.py b/iopaint/model/anytext/ocr_recog/RecModel.py new file mode 100755 index 0000000..c2313bf --- /dev/null +++ b/iopaint/model/anytext/ocr_recog/RecModel.py @@ -0,0 +1,45 @@ +from torch import nn +from .RNN import SequenceEncoder, Im2Seq, Im2Im +from .RecMv1_enhance import MobileNetV1Enhance + +from .RecCTCHead import CTCHead + +backbone_dict = {"MobileNetV1Enhance":MobileNetV1Enhance} +neck_dict = {'SequenceEncoder': SequenceEncoder, 'Im2Seq': Im2Seq,'None':Im2Im} +head_dict = {'CTCHead':CTCHead} + + +class RecModel(nn.Module): + def __init__(self, config): + super().__init__() + assert 'in_channels' in config, 'in_channels must in model config' + backbone_type = config.backbone.pop('type') + assert backbone_type in backbone_dict, f'backbone.type must in {backbone_dict}' + self.backbone = backbone_dict[backbone_type](config.in_channels, **config.backbone) + + neck_type = config.neck.pop('type') + assert neck_type in neck_dict, f'neck.type must in {neck_dict}' + self.neck = neck_dict[neck_type](self.backbone.out_channels, **config.neck) + + head_type = config.head.pop('type') + assert head_type in head_dict, f'head.type must in {head_dict}' + self.head = head_dict[head_type](self.neck.out_channels, **config.head) + + self.name = f'RecModel_{backbone_type}_{neck_type}_{head_type}' + + def load_3rd_state_dict(self, _3rd_name, _state): + self.backbone.load_3rd_state_dict(_3rd_name, _state) + self.neck.load_3rd_state_dict(_3rd_name, _state) + self.head.load_3rd_state_dict(_3rd_name, _state) + + def forward(self, x): + x = self.backbone(x) + x = self.neck(x) + x = self.head(x) + return x + + def encode(self, x): + x = self.backbone(x) + x = self.neck(x) + x = self.head.ctc_encoder(x) + return x diff --git a/iopaint/model/anytext/ocr_recog/RecMv1_enhance.py b/iopaint/model/anytext/ocr_recog/RecMv1_enhance.py new file mode 100644 index 0000000..7529b4a --- /dev/null +++ b/iopaint/model/anytext/ocr_recog/RecMv1_enhance.py @@ -0,0 +1,232 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from .common import Activation + + +class ConvBNLayer(nn.Module): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + act='hard_swish'): + super(ConvBNLayer, self).__init__() + self.act = act + self._conv = nn.Conv2d( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + bias=False) + + self._batch_norm = nn.BatchNorm2d( + num_filters, + ) + if self.act is not None: + self._act = Activation(act_type=act, inplace=True) + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + if self.act is not None: + y = self._act(y) + return y + + +class DepthwiseSeparable(nn.Module): + def __init__(self, + num_channels, + num_filters1, + num_filters2, + num_groups, + stride, + scale, + dw_size=3, + padding=1, + use_se=False): + super(DepthwiseSeparable, self).__init__() + self.use_se = use_se + self._depthwise_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=int(num_filters1 * scale), + filter_size=dw_size, + stride=stride, + padding=padding, + num_groups=int(num_groups * scale)) + if use_se: + self._se = SEModule(int(num_filters1 * scale)) + self._pointwise_conv = ConvBNLayer( + num_channels=int(num_filters1 * scale), + filter_size=1, + num_filters=int(num_filters2 * scale), + stride=1, + padding=0) + + def forward(self, inputs): + y = self._depthwise_conv(inputs) + if self.use_se: + y = self._se(y) + y = self._pointwise_conv(y) + return y + + +class MobileNetV1Enhance(nn.Module): + def __init__(self, + in_channels=3, + scale=0.5, + last_conv_stride=1, + last_pool_type='max', + **kwargs): + super().__init__() + self.scale = scale + self.block_list = [] + + self.conv1 = ConvBNLayer( + num_channels=in_channels, + filter_size=3, + channels=3, + num_filters=int(32 * scale), + stride=2, + padding=1) + + conv2_1 = DepthwiseSeparable( + num_channels=int(32 * scale), + num_filters1=32, + num_filters2=64, + num_groups=32, + stride=1, + scale=scale) + self.block_list.append(conv2_1) + + conv2_2 = DepthwiseSeparable( + num_channels=int(64 * scale), + num_filters1=64, + num_filters2=128, + num_groups=64, + stride=1, + scale=scale) + self.block_list.append(conv2_2) + + conv3_1 = DepthwiseSeparable( + num_channels=int(128 * scale), + num_filters1=128, + num_filters2=128, + num_groups=128, + stride=1, + scale=scale) + self.block_list.append(conv3_1) + + conv3_2 = DepthwiseSeparable( + num_channels=int(128 * scale), + num_filters1=128, + num_filters2=256, + num_groups=128, + stride=(2, 1), + scale=scale) + self.block_list.append(conv3_2) + + conv4_1 = DepthwiseSeparable( + num_channels=int(256 * scale), + num_filters1=256, + num_filters2=256, + num_groups=256, + stride=1, + scale=scale) + self.block_list.append(conv4_1) + + conv4_2 = DepthwiseSeparable( + num_channels=int(256 * scale), + num_filters1=256, + num_filters2=512, + num_groups=256, + stride=(2, 1), + scale=scale) + self.block_list.append(conv4_2) + + for _ in range(5): + conv5 = DepthwiseSeparable( + num_channels=int(512 * scale), + num_filters1=512, + num_filters2=512, + num_groups=512, + stride=1, + dw_size=5, + padding=2, + scale=scale, + use_se=False) + self.block_list.append(conv5) + + conv5_6 = DepthwiseSeparable( + num_channels=int(512 * scale), + num_filters1=512, + num_filters2=1024, + num_groups=512, + stride=(2, 1), + dw_size=5, + padding=2, + scale=scale, + use_se=True) + self.block_list.append(conv5_6) + + conv6 = DepthwiseSeparable( + num_channels=int(1024 * scale), + num_filters1=1024, + num_filters2=1024, + num_groups=1024, + stride=last_conv_stride, + dw_size=5, + padding=2, + use_se=True, + scale=scale) + self.block_list.append(conv6) + + self.block_list = nn.Sequential(*self.block_list) + if last_pool_type == 'avg': + self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) + else: + self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + self.out_channels = int(1024 * scale) + + def forward(self, inputs): + y = self.conv1(inputs) + y = self.block_list(y) + y = self.pool(y) + return y + +def hardsigmoid(x): + return F.relu6(x + 3., inplace=True) / 6. + +class SEModule(nn.Module): + def __init__(self, channel, reduction=4): + super(SEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0, + bias=True) + self.conv2 = nn.Conv2d( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0, + bias=True) + + def forward(self, inputs): + outputs = self.avg_pool(inputs) + outputs = self.conv1(outputs) + outputs = F.relu(outputs) + outputs = self.conv2(outputs) + outputs = hardsigmoid(outputs) + x = torch.mul(inputs, outputs) + + return x diff --git a/iopaint/model/anytext/ocr_recog/RecSVTR.py b/iopaint/model/anytext/ocr_recog/RecSVTR.py new file mode 100644 index 0000000..484b3df --- /dev/null +++ b/iopaint/model/anytext/ocr_recog/RecSVTR.py @@ -0,0 +1,591 @@ +import torch +import torch.nn as nn +import numpy as np +from torch.nn.init import trunc_normal_, zeros_, ones_ +from torch.nn import functional + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = torch.tensor(1 - drop_prob) + shape = (x.size()[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype) + random_tensor = torch.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class Swish(nn.Module): + def __int__(self): + super(Swish, self).__int__() + + def forward(self,x): + return x*torch.sigmoid(x) + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=0, + bias_attr=False, + groups=1, + act=nn.GELU): + super().__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + # weight_attr=paddle.ParamAttr(initializer=nn.initializer.KaimingUniform()), + bias=bias_attr) + self.norm = nn.BatchNorm2d(out_channels) + self.act = act() + + def forward(self, inputs): + out = self.conv(inputs) + out = self.norm(out) + out = self.act(out) + return out + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Identity(nn.Module): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, input): + return input + + +class Mlp(nn.Module): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + if isinstance(act_layer, str): + self.act = Swish() + else: + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class ConvMixer(nn.Module): + def __init__( + self, + dim, + num_heads=8, + HW=(8, 25), + local_k=(3, 3), ): + super().__init__() + self.HW = HW + self.dim = dim + self.local_mixer = nn.Conv2d( + dim, + dim, + local_k, + 1, (local_k[0] // 2, local_k[1] // 2), + groups=num_heads, + # weight_attr=ParamAttr(initializer=KaimingNormal()) + ) + + def forward(self, x): + h = self.HW[0] + w = self.HW[1] + x = x.transpose([0, 2, 1]).reshape([0, self.dim, h, w]) + x = self.local_mixer(x) + x = x.flatten(2).transpose([0, 2, 1]) + return x + + +class Attention(nn.Module): + def __init__(self, + dim, + num_heads=8, + mixer='Global', + HW=(8, 25), + local_k=(7, 11), + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.HW = HW + if HW is not None: + H = HW[0] + W = HW[1] + self.N = H * W + self.C = dim + if mixer == 'Local' and HW is not None: + hk = local_k[0] + wk = local_k[1] + mask = torch.ones([H * W, H + hk - 1, W + wk - 1]) + for h in range(0, H): + for w in range(0, W): + mask[h * W + w, h:h + hk, w:w + wk] = 0. + mask_paddle = mask[:, hk // 2:H + hk // 2, wk // 2:W + wk // + 2].flatten(1) + mask_inf = torch.full([H * W, H * W],fill_value=float('-inf')) + mask = torch.where(mask_paddle < 1, mask_paddle, mask_inf) + self.mask = mask[None,None,:] + # self.mask = mask.unsqueeze([0, 1]) + self.mixer = mixer + + def forward(self, x): + if self.HW is not None: + N = self.N + C = self.C + else: + _, N, C = x.shape + qkv = self.qkv(x).reshape((-1, N, 3, self.num_heads, C //self.num_heads)).permute((2, 0, 3, 1, 4)) + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + + attn = (q.matmul(k.permute((0, 1, 3, 2)))) + if self.mixer == 'Local': + attn += self.mask + attn = functional.softmax(attn, dim=-1) + attn = self.attn_drop(attn) + + x = (attn.matmul(v)).permute((0, 2, 1, 3)).reshape((-1, N, C)) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + def __init__(self, + dim, + num_heads, + mixer='Global', + local_mixer=(7, 11), + HW=(8, 25), + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer='nn.LayerNorm', + epsilon=1e-6, + prenorm=True): + super().__init__() + if isinstance(norm_layer, str): + self.norm1 = eval(norm_layer)(dim, eps=epsilon) + else: + self.norm1 = norm_layer(dim) + if mixer == 'Global' or mixer == 'Local': + + self.mixer = Attention( + dim, + num_heads=num_heads, + mixer=mixer, + HW=HW, + local_k=local_mixer, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + elif mixer == 'Conv': + self.mixer = ConvMixer( + dim, num_heads=num_heads, HW=HW, local_k=local_mixer) + else: + raise TypeError("The mixer must be one of [Global, Local, Conv]") + + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + if isinstance(norm_layer, str): + self.norm2 = eval(norm_layer)(dim, eps=epsilon) + else: + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp_ratio = mlp_ratio + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + self.prenorm = prenorm + + def forward(self, x): + if self.prenorm: + x = self.norm1(x + self.drop_path(self.mixer(x))) + x = self.norm2(x + self.drop_path(self.mlp(x))) + else: + x = x + self.drop_path(self.mixer(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, + img_size=(32, 100), + in_channels=3, + embed_dim=768, + sub_num=2): + super().__init__() + num_patches = (img_size[1] // (2 ** sub_num)) * \ + (img_size[0] // (2 ** sub_num)) + self.img_size = img_size + self.num_patches = num_patches + self.embed_dim = embed_dim + self.norm = None + if sub_num == 2: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False)) + if sub_num == 3: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 4, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False), + ConvBNLayer( + in_channels=embed_dim // 4, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act=nn.GELU, + bias_attr=False)) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).permute(0, 2, 1) + return x + + +class SubSample(nn.Module): + def __init__(self, + in_channels, + out_channels, + types='Pool', + stride=(2, 1), + sub_norm='nn.LayerNorm', + act=None): + super().__init__() + self.types = types + if types == 'Pool': + self.avgpool = nn.AvgPool2d( + kernel_size=(3, 5), stride=stride, padding=(1, 2)) + self.maxpool = nn.MaxPool2d( + kernel_size=(3, 5), stride=stride, padding=(1, 2)) + self.proj = nn.Linear(in_channels, out_channels) + else: + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=1, + # weight_attr=ParamAttr(initializer=KaimingNormal()) + ) + self.norm = eval(sub_norm)(out_channels) + if act is not None: + self.act = act() + else: + self.act = None + + def forward(self, x): + + if self.types == 'Pool': + x1 = self.avgpool(x) + x2 = self.maxpool(x) + x = (x1 + x2) * 0.5 + out = self.proj(x.flatten(2).permute((0, 2, 1))) + else: + x = self.conv(x) + out = x.flatten(2).permute((0, 2, 1)) + out = self.norm(out) + if self.act is not None: + out = self.act(out) + + return out + + +class SVTRNet(nn.Module): + def __init__( + self, + img_size=[48, 100], + in_channels=3, + embed_dim=[64, 128, 256], + depth=[3, 6, 3], + num_heads=[2, 4, 8], + mixer=['Local'] * 6 + ['Global'] * + 6, # Local atten, Global atten, Conv + local_mixer=[[7, 11], [7, 11], [7, 11]], + patch_merging='Conv', # Conv, Pool, None + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + last_drop=0.1, + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer='nn.LayerNorm', + sub_norm='nn.LayerNorm', + epsilon=1e-6, + out_channels=192, + out_char_num=25, + block_unit='Block', + act='nn.GELU', + last_stage=True, + sub_num=2, + prenorm=True, + use_lenhead=False, + **kwargs): + super().__init__() + self.img_size = img_size + self.embed_dim = embed_dim + self.out_channels = out_channels + self.prenorm = prenorm + patch_merging = None if patch_merging != 'Conv' and patch_merging != 'Pool' else patch_merging + self.patch_embed = PatchEmbed( + img_size=img_size, + in_channels=in_channels, + embed_dim=embed_dim[0], + sub_num=sub_num) + num_patches = self.patch_embed.num_patches + self.HW = [img_size[0] // (2**sub_num), img_size[1] // (2**sub_num)] + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim[0])) + # self.pos_embed = self.create_parameter( + # shape=[1, num_patches, embed_dim[0]], default_initializer=zeros_) + + # self.add_parameter("pos_embed", self.pos_embed) + + self.pos_drop = nn.Dropout(p=drop_rate) + Block_unit = eval(block_unit) + + dpr = np.linspace(0, drop_path_rate, sum(depth)) + self.blocks1 = nn.ModuleList( + [ + Block_unit( + dim=embed_dim[0], + num_heads=num_heads[0], + mixer=mixer[0:depth[0]][i], + HW=self.HW, + local_mixer=local_mixer[0], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=eval(act), + attn_drop=attn_drop_rate, + drop_path=dpr[0:depth[0]][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[0]) + ] + ) + if patch_merging is not None: + self.sub_sample1 = SubSample( + embed_dim[0], + embed_dim[1], + sub_norm=sub_norm, + stride=[2, 1], + types=patch_merging) + HW = [self.HW[0] // 2, self.HW[1]] + else: + HW = self.HW + self.patch_merging = patch_merging + self.blocks2 = nn.ModuleList([ + Block_unit( + dim=embed_dim[1], + num_heads=num_heads[1], + mixer=mixer[depth[0]:depth[0] + depth[1]][i], + HW=HW, + local_mixer=local_mixer[1], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=eval(act), + attn_drop=attn_drop_rate, + drop_path=dpr[depth[0]:depth[0] + depth[1]][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[1]) + ]) + if patch_merging is not None: + self.sub_sample2 = SubSample( + embed_dim[1], + embed_dim[2], + sub_norm=sub_norm, + stride=[2, 1], + types=patch_merging) + HW = [self.HW[0] // 4, self.HW[1]] + else: + HW = self.HW + self.blocks3 = nn.ModuleList([ + Block_unit( + dim=embed_dim[2], + num_heads=num_heads[2], + mixer=mixer[depth[0] + depth[1]:][i], + HW=HW, + local_mixer=local_mixer[2], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=eval(act), + attn_drop=attn_drop_rate, + drop_path=dpr[depth[0] + depth[1]:][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[2]) + ]) + self.last_stage = last_stage + if last_stage: + self.avg_pool = nn.AdaptiveAvgPool2d((1, out_char_num)) + self.last_conv = nn.Conv2d( + in_channels=embed_dim[2], + out_channels=self.out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.hardswish = nn.Hardswish() + self.dropout = nn.Dropout(p=last_drop) + if not prenorm: + self.norm = eval(norm_layer)(embed_dim[-1], epsilon=epsilon) + self.use_lenhead = use_lenhead + if use_lenhead: + self.len_conv = nn.Linear(embed_dim[2], self.out_channels) + self.hardswish_len = nn.Hardswish() + self.dropout_len = nn.Dropout( + p=last_drop) + + trunc_normal_(self.pos_embed,std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight,std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def forward_features(self, x): + x = self.patch_embed(x) + x = x + self.pos_embed + x = self.pos_drop(x) + for blk in self.blocks1: + x = blk(x) + if self.patch_merging is not None: + x = self.sub_sample1( + x.permute([0, 2, 1]).reshape( + [-1, self.embed_dim[0], self.HW[0], self.HW[1]])) + for blk in self.blocks2: + x = blk(x) + if self.patch_merging is not None: + x = self.sub_sample2( + x.permute([0, 2, 1]).reshape( + [-1, self.embed_dim[1], self.HW[0] // 2, self.HW[1]])) + for blk in self.blocks3: + x = blk(x) + if not self.prenorm: + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.use_lenhead: + len_x = self.len_conv(x.mean(1)) + len_x = self.dropout_len(self.hardswish_len(len_x)) + if self.last_stage: + if self.patch_merging is not None: + h = self.HW[0] // 4 + else: + h = self.HW[0] + x = self.avg_pool( + x.permute([0, 2, 1]).reshape( + [-1, self.embed_dim[2], h, self.HW[1]])) + x = self.last_conv(x) + x = self.hardswish(x) + x = self.dropout(x) + if self.use_lenhead: + return x, len_x + return x + + +if __name__=="__main__": + a = torch.rand(1,3,48,100) + svtr = SVTRNet() + + out = svtr(a) + print(svtr) + print(out.size()) \ No newline at end of file diff --git a/iopaint/model/anytext/ocr_recog/__init__.py b/iopaint/model/anytext/ocr_recog/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iopaint/model/anytext/ocr_recog/common.py b/iopaint/model/anytext/ocr_recog/common.py new file mode 100644 index 0000000..a328bb0 --- /dev/null +++ b/iopaint/model/anytext/ocr_recog/common.py @@ -0,0 +1,74 @@ + + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Hswish(nn.Module): + def __init__(self, inplace=True): + super(Hswish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x * F.relu6(x + 3., inplace=self.inplace) / 6. + +# out = max(0, min(1, slop*x+offset)) +# paddle.fluid.layers.hard_sigmoid(x, slope=0.2, offset=0.5, name=None) +class Hsigmoid(nn.Module): + def __init__(self, inplace=True): + super(Hsigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + # torch: F.relu6(x + 3., inplace=self.inplace) / 6. + # paddle: F.relu6(1.2 * x + 3., inplace=self.inplace) / 6. + return F.relu6(1.2 * x + 3., inplace=self.inplace) / 6. + +class GELU(nn.Module): + def __init__(self, inplace=True): + super(GELU, self).__init__() + self.inplace = inplace + + def forward(self, x): + return torch.nn.functional.gelu(x) + + +class Swish(nn.Module): + def __init__(self, inplace=True): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + if self.inplace: + x.mul_(torch.sigmoid(x)) + return x + else: + return x*torch.sigmoid(x) + + +class Activation(nn.Module): + def __init__(self, act_type, inplace=True): + super(Activation, self).__init__() + act_type = act_type.lower() + if act_type == 'relu': + self.act = nn.ReLU(inplace=inplace) + elif act_type == 'relu6': + self.act = nn.ReLU6(inplace=inplace) + elif act_type == 'sigmoid': + raise NotImplementedError + elif act_type == 'hard_sigmoid': + self.act = Hsigmoid(inplace) + elif act_type == 'hard_swish': + self.act = Hswish(inplace=inplace) + elif act_type == 'leakyrelu': + self.act = nn.LeakyReLU(inplace=inplace) + elif act_type == 'gelu': + self.act = GELU(inplace=inplace) + elif act_type == 'swish': + self.act = Swish(inplace=inplace) + else: + raise NotImplementedError + + def forward(self, inputs): + return self.act(inputs) \ No newline at end of file diff --git a/iopaint/model/anytext/ocr_recog/en_dict.txt b/iopaint/model/anytext/ocr_recog/en_dict.txt new file mode 100644 index 0000000..7677d31 --- /dev/null +++ b/iopaint/model/anytext/ocr_recog/en_dict.txt @@ -0,0 +1,95 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +\ +] +^ +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ + diff --git a/iopaint/model/anytext/ocr_recog/ppocr_keys_v1.txt b/iopaint/model/anytext/ocr_recog/ppocr_keys_v1.txt new file mode 100644 index 0000000..84b885d --- /dev/null +++ b/iopaint/model/anytext/ocr_recog/ppocr_keys_v1.txt @@ -0,0 +1,6623 @@ +' +疗 +绚 +诚 +娇 +溜 +题 +贿 +者 +廖 +更 +纳 +加 +奉 +公 +一 +就 +汴 +计 +与 +路 +房 +原 +妇 +2 +0 +8 +- +7 +其 +> +: +] +, +, +骑 +刈 +全 +消 +昏 +傈 +安 +久 +钟 +嗅 +不 +影 +处 +驽 +蜿 +资 +关 +椤 +地 +瘸 +专 +问 +忖 +票 +嫉 +炎 +韵 +要 +月 +田 +节 +陂 +鄙 +捌 +备 +拳 +伺 +眼 +网 +盎 +大 +傍 +心 +东 +愉 +汇 +蹿 +科 +每 +业 +里 +航 +晏 +字 +平 +录 +先 +1 +3 +彤 +鲶 +产 +稍 +督 +腴 +有 +象 +岳 +注 +绍 +在 +泺 +文 +定 +核 +名 +水 +过 +理 +让 +偷 +率 +等 +这 +发 +” +为 +含 +肥 +酉 +相 +鄱 +七 +编 +猥 +锛 +日 +镀 +蒂 +掰 +倒 +辆 +栾 +栗 +综 +涩 +州 +雌 +滑 +馀 +了 +机 +块 +司 +宰 +甙 +兴 +矽 +抚 +保 +用 +沧 +秩 +如 +收 +息 +滥 +页 +疑 +埠 +! +! +姥 +异 +橹 +钇 +向 +下 +跄 +的 +椴 +沫 +国 +绥 +獠 +报 +开 +民 +蜇 +何 +分 +凇 +长 +讥 +藏 +掏 +施 +羽 +中 +讲 +派 +嘟 +人 +提 +浼 +间 +世 +而 +古 +多 +倪 +唇 +饯 +控 +庚 +首 +赛 +蜓 +味 +断 +制 +觉 +技 +替 +艰 +溢 +潮 +夕 +钺 +外 +摘 +枋 +动 +双 +单 +啮 +户 +枇 +确 +锦 +曜 +杜 +或 +能 +效 +霜 +盒 +然 +侗 +电 +晁 +放 +步 +鹃 +新 +杖 +蜂 +吒 +濂 +瞬 +评 +总 +隍 +对 +独 +合 +也 +是 +府 +青 +天 +诲 +墙 +组 +滴 +级 +邀 +帘 +示 +已 +时 +骸 +仄 +泅 +和 +遨 +店 +雇 +疫 +持 +巍 +踮 +境 +只 +亨 +目 +鉴 +崤 +闲 +体 +泄 +杂 +作 +般 +轰 +化 +解 +迂 +诿 +蛭 +璀 +腾 +告 +版 +服 +省 +师 +小 +规 +程 +线 +海 +办 +引 +二 +桧 +牌 +砺 +洄 +裴 +修 +图 +痫 +胡 +许 +犊 +事 +郛 +基 +柴 +呼 +食 +研 +奶 +律 +蛋 +因 +葆 +察 +戏 +褒 +戒 +再 +李 +骁 +工 +貂 +油 +鹅 +章 +啄 +休 +场 +给 +睡 +纷 +豆 +器 +捎 +说 +敏 +学 +会 +浒 +设 +诊 +格 +廓 +查 +来 +霓 +室 +溆 +¢ +诡 +寥 +焕 +舜 +柒 +狐 +回 +戟 +砾 +厄 +实 +翩 +尿 +五 +入 +径 +惭 +喹 +股 +宇 +篝 +| +; +美 +期 +云 +九 +祺 +扮 +靠 +锝 +槌 +系 +企 +酰 +阊 +暂 +蚕 +忻 +豁 +本 +羹 +执 +条 +钦 +H +獒 +限 +进 +季 +楦 +于 +芘 +玖 +铋 +茯 +未 +答 +粘 +括 +样 +精 +欠 +矢 +甥 +帷 +嵩 +扣 +令 +仔 +风 +皈 +行 +支 +部 +蓉 +刮 +站 +蜡 +救 +钊 +汗 +松 +嫌 +成 +可 +. +鹤 +院 +从 +交 +政 +怕 +活 +调 +球 +局 +验 +髌 +第 +韫 +谗 +串 +到 +圆 +年 +米 +/ +* +友 +忿 +检 +区 +看 +自 +敢 +刃 +个 +兹 +弄 +流 +留 +同 +没 +齿 +星 +聆 +轼 +湖 +什 +三 +建 +蛔 +儿 +椋 +汕 +震 +颧 +鲤 +跟 +力 +情 +璺 +铨 +陪 +务 +指 +族 +训 +滦 +鄣 +濮 +扒 +商 +箱 +十 +召 +慷 +辗 +所 +莞 +管 +护 +臭 +横 +硒 +嗓 +接 +侦 +六 +露 +党 +馋 +驾 +剖 +高 +侬 +妪 +幂 +猗 +绺 +骐 +央 +酐 +孝 +筝 +课 +徇 +缰 +门 +男 +西 +项 +句 +谙 +瞒 +秃 +篇 +教 +碲 +罚 +声 +呐 +景 +前 +富 +嘴 +鳌 +稀 +免 +朋 +啬 +睐 +去 +赈 +鱼 +住 +肩 +愕 +速 +旁 +波 +厅 +健 +茼 +厥 +鲟 +谅 +投 +攸 +炔 +数 +方 +击 +呋 +谈 +绩 +别 +愫 +僚 +躬 +鹧 +胪 +炳 +招 +喇 +膨 +泵 +蹦 +毛 +结 +5 +4 +谱 +识 +陕 +粽 +婚 +拟 +构 +且 +搜 +任 +潘 +比 +郢 +妨 +醪 +陀 +桔 +碘 +扎 +选 +哈 +骷 +楷 +亿 +明 +缆 +脯 +监 +睫 +逻 +婵 +共 +赴 +淝 +凡 +惦 +及 +达 +揖 +谩 +澹 +减 +焰 +蛹 +番 +祁 +柏 +员 +禄 +怡 +峤 +龙 +白 +叽 +生 +闯 +起 +细 +装 +谕 +竟 +聚 +钙 +上 +导 +渊 +按 +艾 +辘 +挡 +耒 +盹 +饪 +臀 +记 +邮 +蕙 +受 +各 +医 +搂 +普 +滇 +朗 +茸 +带 +翻 +酚 +( +光 +堤 +墟 +蔷 +万 +幻 +〓 +瑙 +辈 +昧 +盏 +亘 +蛀 +吉 +铰 +请 +子 +假 +闻 +税 +井 +诩 +哨 +嫂 +好 +面 +琐 +校 +馊 +鬣 +缂 +营 +访 +炖 +占 +农 +缀 +否 +经 +钚 +棵 +趟 +张 +亟 +吏 +茶 +谨 +捻 +论 +迸 +堂 +玉 +信 +吧 +瞠 +乡 +姬 +寺 +咬 +溏 +苄 +皿 +意 +赉 +宝 +尔 +钰 +艺 +特 +唳 +踉 +都 +荣 +倚 +登 +荐 +丧 +奇 +涵 +批 +炭 +近 +符 +傩 +感 +道 +着 +菊 +虹 +仲 +众 +懈 +濯 +颞 +眺 +南 +释 +北 +缝 +标 +既 +茗 +整 +撼 +迤 +贲 +挎 +耱 +拒 +某 +妍 +卫 +哇 +英 +矶 +藩 +治 +他 +元 +领 +膜 +遮 +穗 +蛾 +飞 +荒 +棺 +劫 +么 +市 +火 +温 +拈 +棚 +洼 +转 +果 +奕 +卸 +迪 +伸 +泳 +斗 +邡 +侄 +涨 +屯 +萋 +胭 +氡 +崮 +枞 +惧 +冒 +彩 +斜 +手 +豚 +随 +旭 +淑 +妞 +形 +菌 +吲 +沱 +争 +驯 +歹 +挟 +兆 +柱 +传 +至 +包 +内 +响 +临 +红 +功 +弩 +衡 +寂 +禁 +老 +棍 +耆 +渍 +织 +害 +氵 +渑 +布 +载 +靥 +嗬 +虽 +苹 +咨 +娄 +库 +雉 +榜 +帜 +嘲 +套 +瑚 +亲 +簸 +欧 +边 +6 +腿 +旮 +抛 +吹 +瞳 +得 +镓 +梗 +厨 +继 +漾 +愣 +憨 +士 +策 +窑 +抑 +躯 +襟 +脏 +参 +贸 +言 +干 +绸 +鳄 +穷 +藜 +音 +折 +详 +) +举 +悍 +甸 +癌 +黎 +谴 +死 +罩 +迁 +寒 +驷 +袖 +媒 +蒋 +掘 +模 +纠 +恣 +观 +祖 +蛆 +碍 +位 +稿 +主 +澧 +跌 +筏 +京 +锏 +帝 +贴 +证 +糠 +才 +黄 +鲸 +略 +炯 +饱 +四 +出 +园 +犀 +牧 +容 +汉 +杆 +浈 +汰 +瑷 +造 +虫 +瘩 +怪 +驴 +济 +应 +花 +沣 +谔 +夙 +旅 +价 +矿 +以 +考 +s +u +呦 +晒 +巡 +茅 +准 +肟 +瓴 +詹 +仟 +褂 +译 +桌 +混 +宁 +怦 +郑 +抿 +些 +余 +鄂 +饴 +攒 +珑 +群 +阖 +岔 +琨 +藓 +预 +环 +洮 +岌 +宀 +杲 +瀵 +最 +常 +囡 +周 +踊 +女 +鼓 +袭 +喉 +简 +范 +薯 +遐 +疏 +粱 +黜 +禧 +法 +箔 +斤 +遥 +汝 +奥 +直 +贞 +撑 +置 +绱 +集 +她 +馅 +逗 +钧 +橱 +魉 +[ +恙 +躁 +唤 +9 +旺 +膘 +待 +脾 +惫 +购 +吗 +依 +盲 +度 +瘿 +蠖 +俾 +之 +镗 +拇 +鲵 +厝 +簧 +续 +款 +展 +啃 +表 +剔 +品 +钻 +腭 +损 +清 +锶 +统 +涌 +寸 +滨 +贪 +链 +吠 +冈 +伎 +迥 +咏 +吁 +览 +防 +迅 +失 +汾 +阔 +逵 +绀 +蔑 +列 +川 +凭 +努 +熨 +揪 +利 +俱 +绉 +抢 +鸨 +我 +即 +责 +膦 +易 +毓 +鹊 +刹 +玷 +岿 +空 +嘞 +绊 +排 +术 +估 +锷 +违 +们 +苟 +铜 +播 +肘 +件 +烫 +审 +鲂 +广 +像 +铌 +惰 +铟 +巳 +胍 +鲍 +康 +憧 +色 +恢 +想 +拷 +尤 +疳 +知 +S +Y +F +D +A +峄 +裕 +帮 +握 +搔 +氐 +氘 +难 +墒 +沮 +雨 +叁 +缥 +悴 +藐 +湫 +娟 +苑 +稠 +颛 +簇 +后 +阕 +闭 +蕤 +缚 +怎 +佞 +码 +嘤 +蔡 +痊 +舱 +螯 +帕 +赫 +昵 +升 +烬 +岫 +、 +疵 +蜻 +髁 +蕨 +隶 +烛 +械 +丑 +盂 +梁 +强 +鲛 +由 +拘 +揉 +劭 +龟 +撤 +钩 +呕 +孛 +费 +妻 +漂 +求 +阑 +崖 +秤 +甘 +通 +深 +补 +赃 +坎 +床 +啪 +承 +吼 +量 +暇 +钼 +烨 +阂 +擎 +脱 +逮 +称 +P +神 +属 +矗 +华 +届 +狍 +葑 +汹 +育 +患 +窒 +蛰 +佼 +静 +槎 +运 +鳗 +庆 +逝 +曼 +疱 +克 +代 +官 +此 +麸 +耧 +蚌 +晟 +例 +础 +榛 +副 +测 +唰 +缢 +迹 +灬 +霁 +身 +岁 +赭 +扛 +又 +菡 +乜 +雾 +板 +读 +陷 +徉 +贯 +郁 +虑 +变 +钓 +菜 +圾 +现 +琢 +式 +乐 +维 +渔 +浜 +左 +吾 +脑 +钡 +警 +T +啵 +拴 +偌 +漱 +湿 +硕 +止 +骼 +魄 +积 +燥 +联 +踢 +玛 +则 +窿 +见 +振 +畿 +送 +班 +钽 +您 +赵 +刨 +印 +讨 +踝 +籍 +谡 +舌 +崧 +汽 +蔽 +沪 +酥 +绒 +怖 +财 +帖 +肱 +私 +莎 +勋 +羔 +霸 +励 +哼 +帐 +将 +帅 +渠 +纪 +婴 +娩 +岭 +厘 +滕 +吻 +伤 +坝 +冠 +戊 +隆 +瘁 +介 +涧 +物 +黍 +并 +姗 +奢 +蹑 +掣 +垸 +锴 +命 +箍 +捉 +病 +辖 +琰 +眭 +迩 +艘 +绌 +繁 +寅 +若 +毋 +思 +诉 +类 +诈 +燮 +轲 +酮 +狂 +重 +反 +职 +筱 +县 +委 +磕 +绣 +奖 +晋 +濉 +志 +徽 +肠 +呈 +獐 +坻 +口 +片 +碰 +几 +村 +柿 +劳 +料 +获 +亩 +惕 +晕 +厌 +号 +罢 +池 +正 +鏖 +煨 +家 +棕 +复 +尝 +懋 +蜥 +锅 +岛 +扰 +队 +坠 +瘾 +钬 +@ +卧 +疣 +镇 +譬 +冰 +彷 +频 +黯 +据 +垄 +采 +八 +缪 +瘫 +型 +熹 +砰 +楠 +襁 +箐 +但 +嘶 +绳 +啤 +拍 +盥 +穆 +傲 +洗 +盯 +塘 +怔 +筛 +丿 +台 +恒 +喂 +葛 +永 +¥ +烟 +酒 +桦 +书 +砂 +蚝 +缉 +态 +瀚 +袄 +圳 +轻 +蛛 +超 +榧 +遛 +姒 +奘 +铮 +右 +荽 +望 +偻 +卡 +丶 +氰 +附 +做 +革 +索 +戚 +坨 +桷 +唁 +垅 +榻 +岐 +偎 +坛 +莨 +山 +殊 +微 +骇 +陈 +爨 +推 +嗝 +驹 +澡 +藁 +呤 +卤 +嘻 +糅 +逛 +侵 +郓 +酌 +德 +摇 +※ +鬃 +被 +慨 +殡 +羸 +昌 +泡 +戛 +鞋 +河 +宪 +沿 +玲 +鲨 +翅 +哽 +源 +铅 +语 +照 +邯 +址 +荃 +佬 +顺 +鸳 +町 +霭 +睾 +瓢 +夸 +椁 +晓 +酿 +痈 +咔 +侏 +券 +噎 +湍 +签 +嚷 +离 +午 +尚 +社 +锤 +背 +孟 +使 +浪 +缦 +潍 +鞅 +军 +姹 +驶 +笑 +鳟 +鲁 +》 +孽 +钜 +绿 +洱 +礴 +焯 +椰 +颖 +囔 +乌 +孔 +巴 +互 +性 +椽 +哞 +聘 +昨 +早 +暮 +胶 +炀 +隧 +低 +彗 +昝 +铁 +呓 +氽 +藉 +喔 +癖 +瑗 +姨 +权 +胱 +韦 +堑 +蜜 +酋 +楝 +砝 +毁 +靓 +歙 +锲 +究 +屋 +喳 +骨 +辨 +碑 +武 +鸠 +宫 +辜 +烊 +适 +坡 +殃 +培 +佩 +供 +走 +蜈 +迟 +翼 +况 +姣 +凛 +浔 +吃 +飘 +债 +犟 +金 +促 +苛 +崇 +坂 +莳 +畔 +绂 +兵 +蠕 +斋 +根 +砍 +亢 +欢 +恬 +崔 +剁 +餐 +榫 +快 +扶 +‖ +濒 +缠 +鳜 +当 +彭 +驭 +浦 +篮 +昀 +锆 +秸 +钳 +弋 +娣 +瞑 +夷 +龛 +苫 +拱 +致 +% +嵊 +障 +隐 +弑 +初 +娓 +抉 +汩 +累 +蓖 +" +唬 +助 +苓 +昙 +押 +毙 +破 +城 +郧 +逢 +嚏 +獭 +瞻 +溱 +婿 +赊 +跨 +恼 +璧 +萃 +姻 +貉 +灵 +炉 +密 +氛 +陶 +砸 +谬 +衔 +点 +琛 +沛 +枳 +层 +岱 +诺 +脍 +榈 +埂 +征 +冷 +裁 +打 +蹴 +素 +瘘 +逞 +蛐 +聊 +激 +腱 +萘 +踵 +飒 +蓟 +吆 +取 +咙 +簋 +涓 +矩 +曝 +挺 +揣 +座 +你 +史 +舵 +焱 +尘 +苏 +笈 +脚 +溉 +榨 +诵 +樊 +邓 +焊 +义 +庶 +儋 +蟋 +蒲 +赦 +呷 +杞 +诠 +豪 +还 +试 +颓 +茉 +太 +除 +紫 +逃 +痴 +草 +充 +鳕 +珉 +祗 +墨 +渭 +烩 +蘸 +慕 +璇 +镶 +穴 +嵘 +恶 +骂 +险 +绋 +幕 +碉 +肺 +戳 +刘 +潞 +秣 +纾 +潜 +銮 +洛 +须 +罘 +销 +瘪 +汞 +兮 +屉 +r +林 +厕 +质 +探 +划 +狸 +殚 +善 +煊 +烹 +〒 +锈 +逯 +宸 +辍 +泱 +柚 +袍 +远 +蹋 +嶙 +绝 +峥 +娥 +缍 +雀 +徵 +认 +镱 +谷 += +贩 +勉 +撩 +鄯 +斐 +洋 +非 +祚 +泾 +诒 +饿 +撬 +威 +晷 +搭 +芍 +锥 +笺 +蓦 +候 +琊 +档 +礁 +沼 +卵 +荠 +忑 +朝 +凹 +瑞 +头 +仪 +弧 +孵 +畏 +铆 +突 +衲 +车 +浩 +气 +茂 +悖 +厢 +枕 +酝 +戴 +湾 +邹 +飚 +攘 +锂 +写 +宵 +翁 +岷 +无 +喜 +丈 +挑 +嗟 +绛 +殉 +议 +槽 +具 +醇 +淞 +笃 +郴 +阅 +饼 +底 +壕 +砚 +弈 +询 +缕 +庹 +翟 +零 +筷 +暨 +舟 +闺 +甯 +撞 +麂 +茌 +蔼 +很 +珲 +捕 +棠 +角 +阉 +媛 +娲 +诽 +剿 +尉 +爵 +睬 +韩 +诰 +匣 +危 +糍 +镯 +立 +浏 +阳 +少 +盆 +舔 +擘 +匪 +申 +尬 +铣 +旯 +抖 +赘 +瓯 +居 +ˇ +哮 +游 +锭 +茏 +歌 +坏 +甚 +秒 +舞 +沙 +仗 +劲 +潺 +阿 +燧 +郭 +嗖 +霏 +忠 +材 +奂 +耐 +跺 +砀 +输 +岖 +媳 +氟 +极 +摆 +灿 +今 +扔 +腻 +枝 +奎 +药 +熄 +吨 +话 +q +额 +慑 +嘌 +协 +喀 +壳 +埭 +视 +著 +於 +愧 +陲 +翌 +峁 +颅 +佛 +腹 +聋 +侯 +咎 +叟 +秀 +颇 +存 +较 +罪 +哄 +岗 +扫 +栏 +钾 +羌 +己 +璨 +枭 +霉 +煌 +涸 +衿 +键 +镝 +益 +岢 +奏 +连 +夯 +睿 +冥 +均 +糖 +狞 +蹊 +稻 +爸 +刿 +胥 +煜 +丽 +肿 +璃 +掸 +跚 +灾 +垂 +樾 +濑 +乎 +莲 +窄 +犹 +撮 +战 +馄 +软 +络 +显 +鸢 +胸 +宾 +妲 +恕 +埔 +蝌 +份 +遇 +巧 +瞟 +粒 +恰 +剥 +桡 +博 +讯 +凯 +堇 +阶 +滤 +卖 +斌 +骚 +彬 +兑 +磺 +樱 +舷 +两 +娱 +福 +仃 +差 +找 +桁 +÷ +净 +把 +阴 +污 +戬 +雷 +碓 +蕲 +楚 +罡 +焖 +抽 +妫 +咒 +仑 +闱 +尽 +邑 +菁 +爱 +贷 +沥 +鞑 +牡 +嗉 +崴 +骤 +塌 +嗦 +订 +拮 +滓 +捡 +锻 +次 +坪 +杩 +臃 +箬 +融 +珂 +鹗 +宗 +枚 +降 +鸬 +妯 +阄 +堰 +盐 +毅 +必 +杨 +崃 +俺 +甬 +状 +莘 +货 +耸 +菱 +腼 +铸 +唏 +痤 +孚 +澳 +懒 +溅 +翘 +疙 +杷 +淼 +缙 +骰 +喊 +悉 +砻 +坷 +艇 +赁 +界 +谤 +纣 +宴 +晃 +茹 +归 +饭 +梢 +铡 +街 +抄 +肼 +鬟 +苯 +颂 +撷 +戈 +炒 +咆 +茭 +瘙 +负 +仰 +客 +琉 +铢 +封 +卑 +珥 +椿 +镧 +窨 +鬲 +寿 +御 +袤 +铃 +萎 +砖 +餮 +脒 +裳 +肪 +孕 +嫣 +馗 +嵇 +恳 +氯 +江 +石 +褶 +冢 +祸 +阻 +狈 +羞 +银 +靳 +透 +咳 +叼 +敷 +芷 +啥 +它 +瓤 +兰 +痘 +懊 +逑 +肌 +往 +捺 +坊 +甩 +呻 +〃 +沦 +忘 +膻 +祟 +菅 +剧 +崆 +智 +坯 +臧 +霍 +墅 +攻 +眯 +倘 +拢 +骠 +铐 +庭 +岙 +瓠 +′ +缺 +泥 +迢 +捶 +? +? +郏 +喙 +掷 +沌 +纯 +秘 +种 +听 +绘 +固 +螨 +团 +香 +盗 +妒 +埚 +蓝 +拖 +旱 +荞 +铀 +血 +遏 +汲 +辰 +叩 +拽 +幅 +硬 +惶 +桀 +漠 +措 +泼 +唑 +齐 +肾 +念 +酱 +虚 +屁 +耶 +旗 +砦 +闵 +婉 +馆 +拭 +绅 +韧 +忏 +窝 +醋 +葺 +顾 +辞 +倜 +堆 +辋 +逆 +玟 +贱 +疾 +董 +惘 +倌 +锕 +淘 +嘀 +莽 +俭 +笏 +绑 +鲷 +杈 +择 +蟀 +粥 +嗯 +驰 +逾 +案 +谪 +褓 +胫 +哩 +昕 +颚 +鲢 +绠 +躺 +鹄 +崂 +儒 +俨 +丝 +尕 +泌 +啊 +萸 +彰 +幺 +吟 +骄 +苣 +弦 +脊 +瑰 +〈 +诛 +镁 +析 +闪 +剪 +侧 +哟 +框 +螃 +守 +嬗 +燕 +狭 +铈 +缮 +概 +迳 +痧 +鲲 +俯 +售 +笼 +痣 +扉 +挖 +满 +咋 +援 +邱 +扇 +歪 +便 +玑 +绦 +峡 +蛇 +叨 +〖 +泽 +胃 +斓 +喋 +怂 +坟 +猪 +该 +蚬 +炕 +弥 +赞 +棣 +晔 +娠 +挲 +狡 +创 +疖 +铕 +镭 +稷 +挫 +弭 +啾 +翔 +粉 +履 +苘 +哦 +楼 +秕 +铂 +土 +锣 +瘟 +挣 +栉 +习 +享 +桢 +袅 +磨 +桂 +谦 +延 +坚 +蔚 +噗 +署 +谟 +猬 +钎 +恐 +嬉 +雒 +倦 +衅 +亏 +璩 +睹 +刻 +殿 +王 +算 +雕 +麻 +丘 +柯 +骆 +丸 +塍 +谚 +添 +鲈 +垓 +桎 +蚯 +芥 +予 +飕 +镦 +谌 +窗 +醚 +菀 +亮 +搪 +莺 +蒿 +羁 +足 +J +真 +轶 +悬 +衷 +靛 +翊 +掩 +哒 +炅 +掐 +冼 +妮 +l +谐 +稚 +荆 +擒 +犯 +陵 +虏 +浓 +崽 +刍 +陌 +傻 +孜 +千 +靖 +演 +矜 +钕 +煽 +杰 +酗 +渗 +伞 +栋 +俗 +泫 +戍 +罕 +沾 +疽 +灏 +煦 +芬 +磴 +叱 +阱 +榉 +湃 +蜀 +叉 +醒 +彪 +租 +郡 +篷 +屎 +良 +垢 +隗 +弱 +陨 +峪 +砷 +掴 +颁 +胎 +雯 +绵 +贬 +沐 +撵 +隘 +篙 +暖 +曹 +陡 +栓 +填 +臼 +彦 +瓶 +琪 +潼 +哪 +鸡 +摩 +啦 +俟 +锋 +域 +耻 +蔫 +疯 +纹 +撇 +毒 +绶 +痛 +酯 +忍 +爪 +赳 +歆 +嘹 +辕 +烈 +册 +朴 +钱 +吮 +毯 +癜 +娃 +谀 +邵 +厮 +炽 +璞 +邃 +丐 +追 +词 +瓒 +忆 +轧 +芫 +谯 +喷 +弟 +半 +冕 +裙 +掖 +墉 +绮 +寝 +苔 +势 +顷 +褥 +切 +衮 +君 +佳 +嫒 +蚩 +霞 +佚 +洙 +逊 +镖 +暹 +唛 +& +殒 +顶 +碗 +獗 +轭 +铺 +蛊 +废 +恹 +汨 +崩 +珍 +那 +杵 +曲 +纺 +夏 +薰 +傀 +闳 +淬 +姘 +舀 +拧 +卷 +楂 +恍 +讪 +厩 +寮 +篪 +赓 +乘 +灭 +盅 +鞣 +沟 +慎 +挂 +饺 +鼾 +杳 +树 +缨 +丛 +絮 +娌 +臻 +嗳 +篡 +侩 +述 +衰 +矛 +圈 +蚜 +匕 +筹 +匿 +濞 +晨 +叶 +骋 +郝 +挚 +蚴 +滞 +增 +侍 +描 +瓣 +吖 +嫦 +蟒 +匾 +圣 +赌 +毡 +癞 +恺 +百 +曳 +需 +篓 +肮 +庖 +帏 +卿 +驿 +遗 +蹬 +鬓 +骡 +歉 +芎 +胳 +屐 +禽 +烦 +晌 +寄 +媾 +狄 +翡 +苒 +船 +廉 +终 +痞 +殇 +々 +畦 +饶 +改 +拆 +悻 +萄 +£ +瓿 +乃 +訾 +桅 +匮 +溧 +拥 +纱 +铍 +骗 +蕃 +龋 +缬 +父 +佐 +疚 +栎 +醍 +掳 +蓄 +x +惆 +颜 +鲆 +榆 +〔 +猎 +敌 +暴 +谥 +鲫 +贾 +罗 +玻 +缄 +扦 +芪 +癣 +落 +徒 +臾 +恿 +猩 +托 +邴 +肄 +牵 +春 +陛 +耀 +刊 +拓 +蓓 +邳 +堕 +寇 +枉 +淌 +啡 +湄 +兽 +酷 +萼 +碚 +濠 +萤 +夹 +旬 +戮 +梭 +琥 +椭 +昔 +勺 +蜊 +绐 +晚 +孺 +僵 +宣 +摄 +冽 +旨 +萌 +忙 +蚤 +眉 +噼 +蟑 +付 +契 +瓜 +悼 +颡 +壁 +曾 +窕 +颢 +澎 +仿 +俑 +浑 +嵌 +浣 +乍 +碌 +褪 +乱 +蔟 +隙 +玩 +剐 +葫 +箫 +纲 +围 +伐 +决 +伙 +漩 +瑟 +刑 +肓 +镳 +缓 +蹭 +氨 +皓 +典 +畲 +坍 +铑 +檐 +塑 +洞 +倬 +储 +胴 +淳 +戾 +吐 +灼 +惺 +妙 +毕 +珐 +缈 +虱 +盖 +羰 +鸿 +磅 +谓 +髅 +娴 +苴 +唷 +蚣 +霹 +抨 +贤 +唠 +犬 +誓 +逍 +庠 +逼 +麓 +籼 +釉 +呜 +碧 +秧 +氩 +摔 +霄 +穸 +纨 +辟 +妈 +映 +完 +牛 +缴 +嗷 +炊 +恩 +荔 +茆 +掉 +紊 +慌 +莓 +羟 +阙 +萁 +磐 +另 +蕹 +辱 +鳐 +湮 +吡 +吩 +唐 +睦 +垠 +舒 +圜 +冗 +瞿 +溺 +芾 +囱 +匠 +僳 +汐 +菩 +饬 +漓 +黑 +霰 +浸 +濡 +窥 +毂 +蒡 +兢 +驻 +鹉 +芮 +诙 +迫 +雳 +厂 +忐 +臆 +猴 +鸣 +蚪 +栈 +箕 +羡 +渐 +莆 +捍 +眈 +哓 +趴 +蹼 +埕 +嚣 +骛 +宏 +淄 +斑 +噜 +严 +瑛 +垃 +椎 +诱 +压 +庾 +绞 +焘 +廿 +抡 +迄 +棘 +夫 +纬 +锹 +眨 +瞌 +侠 +脐 +竞 +瀑 +孳 +骧 +遁 +姜 +颦 +荪 +滚 +萦 +伪 +逸 +粳 +爬 +锁 +矣 +役 +趣 +洒 +颔 +诏 +逐 +奸 +甭 +惠 +攀 +蹄 +泛 +尼 +拼 +阮 +鹰 +亚 +颈 +惑 +勒 +〉 +际 +肛 +爷 +刚 +钨 +丰 +养 +冶 +鲽 +辉 +蔻 +画 +覆 +皴 +妊 +麦 +返 +醉 +皂 +擀 +〗 +酶 +凑 +粹 +悟 +诀 +硖 +港 +卜 +z +杀 +涕 +± +舍 +铠 +抵 +弛 +段 +敝 +镐 +奠 +拂 +轴 +跛 +袱 +e +t +沉 +菇 +俎 +薪 +峦 +秭 +蟹 +历 +盟 +菠 +寡 +液 +肢 +喻 +染 +裱 +悱 +抱 +氙 +赤 +捅 +猛 +跑 +氮 +谣 +仁 +尺 +辊 +窍 +烙 +衍 +架 +擦 +倏 +璐 +瑁 +币 +楞 +胖 +夔 +趸 +邛 +惴 +饕 +虔 +蝎 +§ +哉 +贝 +宽 +辫 +炮 +扩 +饲 +籽 +魏 +菟 +锰 +伍 +猝 +末 +琳 +哚 +蛎 +邂 +呀 +姿 +鄞 +却 +歧 +仙 +恸 +椐 +森 +牒 +寤 +袒 +婆 +虢 +雅 +钉 +朵 +贼 +欲 +苞 +寰 +故 +龚 +坭 +嘘 +咫 +礼 +硷 +兀 +睢 +汶 +’ +铲 +烧 +绕 +诃 +浃 +钿 +哺 +柜 +讼 +颊 +璁 +腔 +洽 +咐 +脲 +簌 +筠 +镣 +玮 +鞠 +谁 +兼 +姆 +挥 +梯 +蝴 +谘 +漕 +刷 +躏 +宦 +弼 +b +垌 +劈 +麟 +莉 +揭 +笙 +渎 +仕 +嗤 +仓 +配 +怏 +抬 +错 +泯 +镊 +孰 +猿 +邪 +仍 +秋 +鼬 +壹 +歇 +吵 +炼 +< +尧 +射 +柬 +廷 +胧 +霾 +凳 +隋 +肚 +浮 +梦 +祥 +株 +堵 +退 +L +鹫 +跎 +凶 +毽 +荟 +炫 +栩 +玳 +甜 +沂 +鹿 +顽 +伯 +爹 +赔 +蛴 +徐 +匡 +欣 +狰 +缸 +雹 +蟆 +疤 +默 +沤 +啜 +痂 +衣 +禅 +w +i +h +辽 +葳 +黝 +钗 +停 +沽 +棒 +馨 +颌 +肉 +吴 +硫 +悯 +劾 +娈 +马 +啧 +吊 +悌 +镑 +峭 +帆 +瀣 +涉 +咸 +疸 +滋 +泣 +翦 +拙 +癸 +钥 +蜒 ++ +尾 +庄 +凝 +泉 +婢 +渴 +谊 +乞 +陆 +锉 +糊 +鸦 +淮 +I +B +N +晦 +弗 +乔 +庥 +葡 +尻 +席 +橡 +傣 +渣 +拿 +惩 +麋 +斛 +缃 +矮 +蛏 +岘 +鸽 +姐 +膏 +催 +奔 +镒 +喱 +蠡 +摧 +钯 +胤 +柠 +拐 +璋 +鸥 +卢 +荡 +倾 +^ +_ +珀 +逄 +萧 +塾 +掇 +贮 +笆 +聂 +圃 +冲 +嵬 +M +滔 +笕 +值 +炙 +偶 +蜱 +搐 +梆 +汪 +蔬 +腑 +鸯 +蹇 +敞 +绯 +仨 +祯 +谆 +梧 +糗 +鑫 +啸 +豺 +囹 +猾 +巢 +柄 +瀛 +筑 +踌 +沭 +暗 +苁 +鱿 +蹉 +脂 +蘖 +牢 +热 +木 +吸 +溃 +宠 +序 +泞 +偿 +拜 +檩 +厚 +朐 +毗 +螳 +吞 +媚 +朽 +担 +蝗 +橘 +畴 +祈 +糟 +盱 +隼 +郜 +惜 +珠 +裨 +铵 +焙 +琚 +唯 +咚 +噪 +骊 +丫 +滢 +勤 +棉 +呸 +咣 +淀 +隔 +蕾 +窈 +饨 +挨 +煅 +短 +匙 +粕 +镜 +赣 +撕 +墩 +酬 +馁 +豌 +颐 +抗 +酣 +氓 +佑 +搁 +哭 +递 +耷 +涡 +桃 +贻 +碣 +截 +瘦 +昭 +镌 +蔓 +氚 +甲 +猕 +蕴 +蓬 +散 +拾 +纛 +狼 +猷 +铎 +埋 +旖 +矾 +讳 +囊 +糜 +迈 +粟 +蚂 +紧 +鲳 +瘢 +栽 +稼 +羊 +锄 +斟 +睁 +桥 +瓮 +蹙 +祉 +醺 +鼻 +昱 +剃 +跳 +篱 +跷 +蒜 +翎 +宅 +晖 +嗑 +壑 +峻 +癫 +屏 +狠 +陋 +袜 +途 +憎 +祀 +莹 +滟 +佶 +溥 +臣 +约 +盛 +峰 +磁 +慵 +婪 +拦 +莅 +朕 +鹦 +粲 +裤 +哎 +疡 +嫖 +琵 +窟 +堪 +谛 +嘉 +儡 +鳝 +斩 +郾 +驸 +酊 +妄 +胜 +贺 +徙 +傅 +噌 +钢 +栅 +庇 +恋 +匝 +巯 +邈 +尸 +锚 +粗 +佟 +蛟 +薹 +纵 +蚊 +郅 +绢 +锐 +苗 +俞 +篆 +淆 +膀 +鲜 +煎 +诶 +秽 +寻 +涮 +刺 +怀 +噶 +巨 +褰 +魅 +灶 +灌 +桉 +藕 +谜 +舸 +薄 +搀 +恽 +借 +牯 +痉 +渥 +愿 +亓 +耘 +杠 +柩 +锔 +蚶 +钣 +珈 +喘 +蹒 +幽 +赐 +稗 +晤 +莱 +泔 +扯 +肯 +菪 +裆 +腩 +豉 +疆 +骜 +腐 +倭 +珏 +唔 +粮 +亡 +润 +慰 +伽 +橄 +玄 +誉 +醐 +胆 +龊 +粼 +塬 +陇 +彼 +削 +嗣 +绾 +芽 +妗 +垭 +瘴 +爽 +薏 +寨 +龈 +泠 +弹 +赢 +漪 +猫 +嘧 +涂 +恤 +圭 +茧 +烽 +屑 +痕 +巾 +赖 +荸 +凰 +腮 +畈 +亵 +蹲 +偃 +苇 +澜 +艮 +换 +骺 +烘 +苕 +梓 +颉 +肇 +哗 +悄 +氤 +涠 +葬 +屠 +鹭 +植 +竺 +佯 +诣 +鲇 +瘀 +鲅 +邦 +移 +滁 +冯 +耕 +癔 +戌 +茬 +沁 +巩 +悠 +湘 +洪 +痹 +锟 +循 +谋 +腕 +鳃 +钠 +捞 +焉 +迎 +碱 +伫 +急 +榷 +奈 +邝 +卯 +辄 +皲 +卟 +醛 +畹 +忧 +稳 +雄 +昼 +缩 +阈 +睑 +扌 +耗 +曦 +涅 +捏 +瞧 +邕 +淖 +漉 +铝 +耦 +禹 +湛 +喽 +莼 +琅 +诸 +苎 +纂 +硅 +始 +嗨 +傥 +燃 +臂 +赅 +嘈 +呆 +贵 +屹 +壮 +肋 +亍 +蚀 +卅 +豹 +腆 +邬 +迭 +浊 +} +童 +螂 +捐 +圩 +勐 +触 +寞 +汊 +壤 +荫 +膺 +渌 +芳 +懿 +遴 +螈 +泰 +蓼 +蛤 +茜 +舅 +枫 +朔 +膝 +眙 +避 +梅 +判 +鹜 +璜 +牍 +缅 +垫 +藻 +黔 +侥 +惚 +懂 +踩 +腰 +腈 +札 +丞 +唾 +慈 +顿 +摹 +荻 +琬 +~ +斧 +沈 +滂 +胁 +胀 +幄 +莜 +Z +匀 +鄄 +掌 +绰 +茎 +焚 +赋 +萱 +谑 +汁 +铒 +瞎 +夺 +蜗 +野 +娆 +冀 +弯 +篁 +懵 +灞 +隽 +芡 +脘 +俐 +辩 +芯 +掺 +喏 +膈 +蝈 +觐 +悚 +踹 +蔗 +熠 +鼠 +呵 +抓 +橼 +峨 +畜 +缔 +禾 +崭 +弃 +熊 +摒 +凸 +拗 +穹 +蒙 +抒 +祛 +劝 +闫 +扳 +阵 +醌 +踪 +喵 +侣 +搬 +仅 +荧 +赎 +蝾 +琦 +买 +婧 +瞄 +寓 +皎 +冻 +赝 +箩 +莫 +瞰 +郊 +笫 +姝 +筒 +枪 +遣 +煸 +袋 +舆 +痱 +涛 +母 +〇 +启 +践 +耙 +绲 +盘 +遂 +昊 +搞 +槿 +诬 +纰 +泓 +惨 +檬 +亻 +越 +C +o +憩 +熵 +祷 +钒 +暧 +塔 +阗 +胰 +咄 +娶 +魔 +琶 +钞 +邻 +扬 +杉 +殴 +咽 +弓 +〆 +髻 +】 +吭 +揽 +霆 +拄 +殖 +脆 +彻 +岩 +芝 +勃 +辣 +剌 +钝 +嘎 +甄 +佘 +皖 +伦 +授 +徕 +憔 +挪 +皇 +庞 +稔 +芜 +踏 +溴 +兖 +卒 +擢 +饥 +鳞 +煲 +‰ +账 +颗 +叻 +斯 +捧 +鳍 +琮 +讹 +蛙 +纽 +谭 +酸 +兔 +莒 +睇 +伟 +觑 +羲 +嗜 +宜 +褐 +旎 +辛 +卦 +诘 +筋 +鎏 +溪 +挛 +熔 +阜 +晰 +鳅 +丢 +奚 +灸 +呱 +献 +陉 +黛 +鸪 +甾 +萨 +疮 +拯 +洲 +疹 +辑 +叙 +恻 +谒 +允 +柔 +烂 +氏 +逅 +漆 +拎 +惋 +扈 +湟 +纭 +啕 +掬 +擞 +哥 +忽 +涤 +鸵 +靡 +郗 +瓷 +扁 +廊 +怨 +雏 +钮 +敦 +E +懦 +憋 +汀 +拚 +啉 +腌 +岸 +f +痼 +瞅 +尊 +咀 +眩 +飙 +忌 +仝 +迦 +熬 +毫 +胯 +篑 +茄 +腺 +凄 +舛 +碴 +锵 +诧 +羯 +後 +漏 +汤 +宓 +仞 +蚁 +壶 +谰 +皑 +铄 +棰 +罔 +辅 +晶 +苦 +牟 +闽 +\ +烃 +饮 +聿 +丙 +蛳 +朱 +煤 +涔 +鳖 +犁 +罐 +荼 +砒 +淦 +妤 +黏 +戎 +孑 +婕 +瑾 +戢 +钵 +枣 +捋 +砥 +衩 +狙 +桠 +稣 +阎 +肃 +梏 +诫 +孪 +昶 +婊 +衫 +嗔 +侃 +塞 +蜃 +樵 +峒 +貌 +屿 +欺 +缫 +阐 +栖 +诟 +珞 +荭 +吝 +萍 +嗽 +恂 +啻 +蜴 +磬 +峋 +俸 +豫 +谎 +徊 +镍 +韬 +魇 +晴 +U +囟 +猜 +蛮 +坐 +囿 +伴 +亭 +肝 +佗 +蝠 +妃 +胞 +滩 +榴 +氖 +垩 +苋 +砣 +扪 +馏 +姓 +轩 +厉 +夥 +侈 +禀 +垒 +岑 +赏 +钛 +辐 +痔 +披 +纸 +碳 +“ +坞 +蠓 +挤 +荥 +沅 +悔 +铧 +帼 +蒌 +蝇 +a +p +y +n +g +哀 +浆 +瑶 +凿 +桶 +馈 +皮 +奴 +苜 +佤 +伶 +晗 +铱 +炬 +优 +弊 +氢 +恃 +甫 +攥 +端 +锌 +灰 +稹 +炝 +曙 +邋 +亥 +眶 +碾 +拉 +萝 +绔 +捷 +浍 +腋 +姑 +菖 +凌 +涞 +麽 +锢 +桨 +潢 +绎 +镰 +殆 +锑 +渝 +铬 +困 +绽 +觎 +匈 +糙 +暑 +裹 +鸟 +盔 +肽 +迷 +綦 +『 +亳 +佝 +俘 +钴 +觇 +骥 +仆 +疝 +跪 +婶 +郯 +瀹 +唉 +脖 +踞 +针 +晾 +忒 +扼 +瞩 +叛 +椒 +疟 +嗡 +邗 +肆 +跆 +玫 +忡 +捣 +咧 +唆 +艄 +蘑 +潦 +笛 +阚 +沸 +泻 +掊 +菽 +贫 +斥 +髂 +孢 +镂 +赂 +麝 +鸾 +屡 +衬 +苷 +恪 +叠 +希 +粤 +爻 +喝 +茫 +惬 +郸 +绻 +庸 +撅 +碟 +宄 +妹 +膛 +叮 +饵 +崛 +嗲 +椅 +冤 +搅 +咕 +敛 +尹 +垦 +闷 +蝉 +霎 +勰 +败 +蓑 +泸 +肤 +鹌 +幌 +焦 +浠 +鞍 +刁 +舰 +乙 +竿 +裔 +。 +茵 +函 +伊 +兄 +丨 +娜 +匍 +謇 +莪 +宥 +似 +蝽 +翳 +酪 +翠 +粑 +薇 +祢 +骏 +赠 +叫 +Q +噤 +噻 +竖 +芗 +莠 +潭 +俊 +羿 +耜 +O +郫 +趁 +嗪 +囚 +蹶 +芒 +洁 +笋 +鹑 +敲 +硝 +啶 +堡 +渲 +揩 +』 +携 +宿 +遒 +颍 +扭 +棱 +割 +萜 +蔸 +葵 +琴 +捂 +饰 +衙 +耿 +掠 +募 +岂 +窖 +涟 +蔺 +瘤 +柞 +瞪 +怜 +匹 +距 +楔 +炜 +哆 +秦 +缎 +幼 +茁 +绪 +痨 +恨 +楸 +娅 +瓦 +桩 +雪 +嬴 +伏 +榔 +妥 +铿 +拌 +眠 +雍 +缇 +‘ +卓 +搓 +哌 +觞 +噩 +屈 +哧 +髓 +咦 +巅 +娑 +侑 +淫 +膳 +祝 +勾 +姊 +莴 +胄 +疃 +薛 +蜷 +胛 +巷 +芙 +芋 +熙 +闰 +勿 +窃 +狱 +剩 +钏 +幢 +陟 +铛 +慧 +靴 +耍 +k +浙 +浇 +飨 +惟 +绗 +祜 +澈 +啼 +咪 +磷 +摞 +诅 +郦 +抹 +跃 +壬 +吕 +肖 +琏 +颤 +尴 +剡 +抠 +凋 +赚 +泊 +津 +宕 +殷 +倔 +氲 +漫 +邺 +涎 +怠 +$ +垮 +荬 +遵 +俏 +叹 +噢 +饽 +蜘 +孙 +筵 +疼 +鞭 +羧 +牦 +箭 +潴 +c +眸 +祭 +髯 +啖 +坳 +愁 +芩 +驮 +倡 +巽 +穰 +沃 +胚 +怒 +凤 +槛 +剂 +趵 +嫁 +v +邢 +灯 +鄢 +桐 +睽 +檗 +锯 +槟 +婷 +嵋 +圻 +诗 +蕈 +颠 +遭 +痢 +芸 +怯 +馥 +竭 +锗 +徜 +恭 +遍 +籁 +剑 +嘱 +苡 +龄 +僧 +桑 +潸 +弘 +澶 +楹 +悲 +讫 +愤 +腥 +悸 +谍 +椹 +呢 +桓 +葭 +攫 +阀 +翰 +躲 +敖 +柑 +郎 +笨 +橇 +呃 +魁 +燎 +脓 +葩 +磋 +垛 +玺 +狮 +沓 +砜 +蕊 +锺 +罹 +蕉 +翱 +虐 +闾 +巫 +旦 +茱 +嬷 +枯 +鹏 +贡 +芹 +汛 +矫 +绁 +拣 +禺 +佃 +讣 +舫 +惯 +乳 +趋 +疲 +挽 +岚 +虾 +衾 +蠹 +蹂 +飓 +氦 +铖 +孩 +稞 +瑜 +壅 +掀 +勘 +妓 +畅 +髋 +W +庐 +牲 +蓿 +榕 +练 +垣 +唱 +邸 +菲 +昆 +婺 +穿 +绡 +麒 +蚱 +掂 +愚 +泷 +涪 +漳 +妩 +娉 +榄 +讷 +觅 +旧 +藤 +煮 +呛 +柳 +腓 +叭 +庵 +烷 +阡 +罂 +蜕 +擂 +猖 +咿 +媲 +脉 +【 +沏 +貅 +黠 +熏 +哲 +烁 +坦 +酵 +兜 +× +潇 +撒 +剽 +珩 +圹 +乾 +摸 +樟 +帽 +嗒 +襄 +魂 +轿 +憬 +锡 +〕 +喃 +皆 +咖 +隅 +脸 +残 +泮 +袂 +鹂 +珊 +囤 +捆 +咤 +误 +徨 +闹 +淙 +芊 +淋 +怆 +囗 +拨 +梳 +渤 +R +G +绨 +蚓 +婀 +幡 +狩 +麾 +谢 +唢 +裸 +旌 +伉 +纶 +裂 +驳 +砼 +咛 +澄 +樨 +蹈 +宙 +澍 +倍 +貔 +操 +勇 +蟠 +摈 +砧 +虬 +够 +缁 +悦 +藿 +撸 +艹 +摁 +淹 +豇 +虎 +榭 +ˉ +吱 +d +° +喧 +荀 +踱 +侮 +奋 +偕 +饷 +犍 +惮 +坑 +璎 +徘 +宛 +妆 +袈 +倩 +窦 +昂 +荏 +乖 +K +怅 +撰 +鳙 +牙 +袁 +酞 +X +痿 +琼 +闸 +雁 +趾 +荚 +虻 +涝 +《 +杏 +韭 +偈 +烤 +绫 +鞘 +卉 +症 +遢 +蓥 +诋 +杭 +荨 +匆 +竣 +簪 +辙 +敕 +虞 +丹 +缭 +咩 +黟 +m +淤 +瑕 +咂 +铉 +硼 +茨 +嶂 +痒 +畸 +敬 +涿 +粪 +窘 +熟 +叔 +嫔 +盾 +忱 +裘 +憾 +梵 +赡 +珙 +咯 +娘 +庙 +溯 +胺 +葱 +痪 +摊 +荷 +卞 +乒 +髦 +寐 +铭 +坩 +胗 +枷 +爆 +溟 +嚼 +羚 +砬 +轨 +惊 +挠 +罄 +竽 +菏 +氧 +浅 +楣 +盼 +枢 +炸 +阆 +杯 +谏 +噬 +淇 +渺 +俪 +秆 +墓 +泪 +跻 +砌 +痰 +垡 +渡 +耽 +釜 +讶 +鳎 +煞 +呗 +韶 +舶 +绷 +鹳 +缜 +旷 +铊 +皱 +龌 +檀 +霖 +奄 +槐 +艳 +蝶 +旋 +哝 +赶 +骞 +蚧 +腊 +盈 +丁 +` +蜚 +矸 +蝙 +睨 +嚓 +僻 +鬼 +醴 +夜 +彝 +磊 +笔 +拔 +栀 +糕 +厦 +邰 +纫 +逭 +纤 +眦 +膊 +馍 +躇 +烯 +蘼 +冬 +诤 +暄 +骶 +哑 +瘠 +」 +臊 +丕 +愈 +咱 +螺 +擅 +跋 +搏 +硪 +谄 +笠 +淡 +嘿 +骅 +谧 +鼎 +皋 +姚 +歼 +蠢 +驼 +耳 +胬 +挝 +涯 +狗 +蒽 +孓 +犷 +凉 +芦 +箴 +铤 +孤 +嘛 +坤 +V +茴 +朦 +挞 +尖 +橙 +诞 +搴 +碇 +洵 +浚 +帚 +蜍 +漯 +柘 +嚎 +讽 +芭 +荤 +咻 +祠 +秉 +跖 +埃 +吓 +糯 +眷 +馒 +惹 +娼 +鲑 +嫩 +讴 +轮 +瞥 +靶 +褚 +乏 +缤 +宋 +帧 +删 +驱 +碎 +扑 +俩 +俄 +偏 +涣 +竹 +噱 +皙 +佰 +渚 +唧 +斡 +# +镉 +刀 +崎 +筐 +佣 +夭 +贰 +肴 +峙 +哔 +艿 +匐 +牺 +镛 +缘 +仡 +嫡 +劣 +枸 +堀 +梨 +簿 +鸭 +蒸 +亦 +稽 +浴 +{ +衢 +束 +槲 +j +阁 +揍 +疥 +棋 +潋 +聪 +窜 +乓 +睛 +插 +冉 +阪 +苍 +搽 +「 +蟾 +螟 +幸 +仇 +樽 +撂 +慢 +跤 +幔 +俚 +淅 +覃 +觊 +溶 +妖 +帛 +侨 +曰 +妾 +泗 +· +: +瀘 +風 +Ë +( +) +∶ +紅 +紗 +瑭 +雲 +頭 +鶏 +財 +許 +• +¥ +樂 +焗 +麗 +— +; +滙 +東 +榮 +繪 +興 +… +門 +業 +π +楊 +國 +顧 +é +盤 +寳 +Λ +龍 +鳳 +島 +誌 +緣 +結 +銭 +萬 +勝 +祎 +璟 +優 +歡 +臨 +時 +購 += +★ +藍 +昇 +鐵 +觀 +勅 +農 +聲 +畫 +兿 +術 +發 +劉 +記 +專 +耑 +園 +書 +壴 +種 +Ο +● +褀 +號 +銀 +匯 +敟 +锘 +葉 +橪 +廣 +進 +蒄 +鑽 +阝 +祙 +貢 +鍋 +豊 +夬 +喆 +團 +閣 +開 +燁 +賓 +館 +酡 +沔 +順 ++ +硚 +劵 +饸 +陽 +車 +湓 +復 +萊 +氣 +軒 +華 +堃 +迮 +纟 +戶 +馬 +學 +裡 +電 +嶽 +獨 +マ +シ +サ +ジ +燘 +袪 +環 +❤ +臺 +灣 +専 +賣 +孖 +聖 +攝 +線 +▪ +α +傢 +俬 +夢 +達 +莊 +喬 +貝 +薩 +劍 +羅 +壓 +棛 +饦 +尃 +璈 +囍 +醫 +G +I +A +# +N +鷄 +髙 +嬰 +啓 +約 +隹 +潔 +賴 +藝 +~ +寶 +籣 +麺 +  +嶺 +√ +義 +網 +峩 +長 +∧ +魚 +機 +構 +② +鳯 +偉 +L +B +㙟 +畵 +鴿 +' +詩 +溝 +嚞 +屌 +藔 +佧 +玥 +蘭 +織 +1 +3 +9 +0 +7 +點 +砭 +鴨 +鋪 +銘 +廳 +弍 +‧ +創 +湯 +坶 +℃ +卩 +骝 +& +烜 +荘 +當 +潤 +扞 +係 +懷 +碶 +钅 +蚨 +讠 +☆ +叢 +爲 +埗 +涫 +塗 +→ +楽 +現 +鯨 +愛 +瑪 +鈺 +忄 +悶 +藥 +飾 +樓 +視 +孬 +ㆍ +燚 +苪 +師 +① +丼 +锽 +│ +韓 +標 +è +兒 +閏 +匋 +張 +漢 +Ü +髪 +會 +閑 +檔 +習 +裝 +の +峯 +菘 +輝 +И +雞 +釣 +億 +浐 +K +O +R +8 +H +E +P +T +W +D +S +C +M +F +姌 +饹 +» +晞 +廰 +ä +嵯 +鷹 +負 +飲 +絲 +冚 +楗 +澤 +綫 +區 +❋ +← +質 +靑 +揚 +③ +滬 +統 +産 +協 +﹑ +乸 +畐 +經 +運 +際 +洺 +岽 +為 +粵 +諾 +崋 +豐 +碁 +ɔ +V +2 +6 +齋 +誠 +訂 +´ +勑 +雙 +陳 +無 +í +泩 +媄 +夌 +刂 +i +c +t +o +r +a +嘢 +耄 +燴 +暃 +壽 +媽 +靈 +抻 +體 +唻 +É +冮 +甹 +鎮 +錦 +ʌ +蜛 +蠄 +尓 +駕 +戀 +飬 +逹 +倫 +貴 +極 +Я +Й +寬 +磚 +嶪 +郎 +職 +| +間 +n +d +剎 +伈 +課 +飛 +橋 +瘊 +№ +譜 +骓 +圗 +滘 +縣 +粿 +咅 +養 +濤 +彳 +® +% +Ⅱ +啰 +㴪 +見 +矞 +薬 +糁 +邨 +鲮 +顔 +罱 +З +選 +話 +贏 +氪 +俵 +競 +瑩 +繡 +枱 +β +綉 +á +獅 +爾 +™ +麵 +戋 +淩 +徳 +個 +劇 +場 +務 +簡 +寵 +h +實 +膠 +轱 +圖 +築 +嘣 +樹 +㸃 +營 +耵 +孫 +饃 +鄺 +飯 +麯 +遠 +輸 +坫 +孃 +乚 +閃 +鏢 +㎡ +題 +廠 +關 +↑ +爺 +將 +軍 +連 +篦 +覌 +參 +箸 +- +窠 +棽 +寕 +夀 +爰 +歐 +呙 +閥 +頡 +熱 +雎 +垟 +裟 +凬 +勁 +帑 +馕 +夆 +疌 +枼 +馮 +貨 +蒤 +樸 +彧 +旸 +靜 +龢 +暢 +㐱 +鳥 +珺 +鏡 +灡 +爭 +堷 +廚 +Ó +騰 +診 +┅ +蘇 +褔 +凱 +頂 +豕 +亞 +帥 +嘬 +⊥ +仺 +桖 +複 +饣 +絡 +穂 +顏 +棟 +納 +▏ +濟 +親 +設 +計 +攵 +埌 +烺 +ò +頤 +燦 +蓮 +撻 +節 +講 +濱 +濃 +娽 +洳 +朿 +燈 +鈴 +護 +膚 +铔 +過 +補 +Z +U +5 +4 +坋 +闿 +䖝 +餘 +缐 +铞 +貿 +铪 +桼 +趙 +鍊 +[ +㐂 +垚 +菓 +揸 +捲 +鐘 +滏 +𣇉 +爍 +輪 +燜 +鴻 +鮮 +動 +鹞 +鷗 +丄 +慶 +鉌 +翥 +飮 +腸 +⇋ +漁 +覺 +來 +熘 +昴 +翏 +鲱 +圧 +鄉 +萭 +頔 +爐 +嫚 +г +貭 +類 +聯 +幛 +輕 +訓 +鑒 +夋 +锨 +芃 +珣 +䝉 +扙 +嵐 +銷 +處 +ㄱ +語 +誘 +苝 +歸 +儀 +燒 +楿 +內 +粢 +葒 +奧 +麥 +礻 +滿 +蠔 +穵 +瞭 +態 +鱬 +榞 +硂 +鄭 +黃 +煙 +祐 +奓 +逺 +* +瑄 +獲 +聞 +薦 +讀 +這 +樣 +決 +問 +啟 +們 +執 +説 +轉 +單 +隨 +唘 +帶 +倉 +庫 +還 +贈 +尙 +皺 +■ +餅 +產 +○ +∈ +報 +狀 +楓 +賠 +琯 +嗮 +禮 +` +傳 +> +≤ +嗞 +Φ +≥ +換 +咭 +∣ +↓ +曬 +ε +応 +寫 +″ +終 +様 +純 +費 +療 +聨 +凍 +壐 +郵 +ü +黒 +∫ +製 +塊 +調 +軽 +確 +撃 +級 +馴 +Ⅲ +涇 +繹 +數 +碼 +證 +狒 +処 +劑 +< +晧 +賀 +衆 +] +櫥 +兩 +陰 +絶 +對 +鯉 +憶 +◎ +p +e +Y +蕒 +煖 +頓 +測 +試 +鼽 +僑 +碩 +妝 +帯 +≈ +鐡 +舖 +權 +喫 +倆 +ˋ +該 +悅 +ā +俫 +. +f +s +b +m +k +g +u +j +貼 +淨 +濕 +針 +適 +備 +l +/ +給 +謢 +強 +觸 +衛 +與 +⊙ +$ +緯 +變 +⑴ +⑵ +⑶ +㎏ +殺 +∩ +幚 +─ +價 +▲ +離 +ú +ó +飄 +烏 +関 +閟 +﹝ +﹞ +邏 +輯 +鍵 +驗 +訣 +導 +歷 +屆 +層 +▼ +儱 +錄 +熳 +ē +艦 +吋 +錶 +辧 +飼 +顯 +④ +禦 +販 +気 +対 +枰 +閩 +紀 +幹 +瞓 +貊 +淚 +△ +眞 +墊 +Ω +獻 +褲 +縫 +緑 +亜 +鉅 +餠 +{ +} +◆ +蘆 +薈 +█ +◇ +溫 +彈 +晳 +粧 +犸 +穩 +訊 +崬 +凖 +熥 +П +舊 +條 +紋 +圍 +Ⅳ +筆 +尷 +難 +雜 +錯 +綁 +識 +頰 +鎖 +艶 +□ +殁 +殼 +⑧ +├ +▕ +鵬 +ǐ +ō +ǒ +糝 +綱 +▎ +μ +盜 +饅 +醬 +籤 +蓋 +釀 +鹽 +據 +à +ɡ +辦 +◥ +彐 +┌ +婦 +獸 +鲩 +伱 +ī +蒟 +蒻 +齊 +袆 +腦 +寧 +凈 +妳 +煥 +詢 +偽 +謹 +啫 +鯽 +騷 +鱸 +損 +傷 +鎻 +髮 +買 +冏 +儥 +両 +﹢ +∞ +載 +喰 +z +羙 +悵 +燙 +曉 +員 +組 +徹 +艷 +痠 +鋼 +鼙 +縮 +細 +嚒 +爯 +≠ +維 +" +鱻 +壇 +厍 +帰 +浥 +犇 +薡 +軎 +² +應 +醜 +刪 +緻 +鶴 +賜 +噁 +軌 +尨 +镔 +鷺 +槗 +彌 +葚 +濛 +請 +溇 +緹 +賢 +訪 +獴 +瑅 +資 +縤 +陣 +蕟 +栢 +韻 +祼 +恁 +伢 +謝 +劃 +涑 +總 +衖 +踺 +砋 +凉 +籃 +駿 +苼 +瘋 +昽 +紡 +驊 +腎 +﹗ +響 +杋 +剛 +嚴 +禪 +歓 +槍 +傘 +檸 +檫 +炣 +勢 +鏜 +鎢 +銑 +尐 +減 +奪 +惡 +θ +僮 +婭 +臘 +ū +ì +殻 +鉄 +∑ +蛲 +焼 +緖 +續 +紹 +懮 \ No newline at end of file diff --git a/iopaint/model/anytext/utils.py b/iopaint/model/anytext/utils.py new file mode 100644 index 0000000..c9f55b8 --- /dev/null +++ b/iopaint/model/anytext/utils.py @@ -0,0 +1,151 @@ +import os +import datetime +import cv2 +import numpy as np +from PIL import Image, ImageDraw + + +def save_images(img_list, folder): + if not os.path.exists(folder): + os.makedirs(folder) + now = datetime.datetime.now() + date_str = now.strftime("%Y-%m-%d") + folder_path = os.path.join(folder, date_str) + if not os.path.exists(folder_path): + os.makedirs(folder_path) + time_str = now.strftime("%H_%M_%S") + for idx, img in enumerate(img_list): + image_number = idx + 1 + filename = f"{time_str}_{image_number}.jpg" + save_path = os.path.join(folder_path, filename) + cv2.imwrite(save_path, img[..., ::-1]) + + +def check_channels(image): + channels = image.shape[2] if len(image.shape) == 3 else 1 + if channels == 1: + image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) + elif channels > 3: + image = image[:, :, :3] + return image + + +def resize_image(img, max_length=768): + height, width = img.shape[:2] + max_dimension = max(height, width) + + if max_dimension > max_length: + scale_factor = max_length / max_dimension + new_width = int(round(width * scale_factor)) + new_height = int(round(height * scale_factor)) + new_size = (new_width, new_height) + img = cv2.resize(img, new_size) + height, width = img.shape[:2] + img = cv2.resize(img, (width - (width % 64), height - (height % 64))) + return img + + +def insert_spaces(string, nSpace): + if nSpace == 0: + return string + new_string = "" + for char in string: + new_string += char + " " * nSpace + return new_string[:-nSpace] + + +def draw_glyph(font, text): + g_size = 50 + W, H = (512, 80) + new_font = font.font_variant(size=g_size) + img = Image.new(mode="1", size=(W, H), color=0) + draw = ImageDraw.Draw(img) + left, top, right, bottom = new_font.getbbox(text) + text_width = max(right - left, 5) + text_height = max(bottom - top, 5) + ratio = min(W * 0.9 / text_width, H * 0.9 / text_height) + new_font = font.font_variant(size=int(g_size * ratio)) + + text_width, text_height = new_font.getsize(text) + offset_x, offset_y = new_font.getoffset(text) + x = (img.width - text_width) // 2 + y = (img.height - text_height) // 2 - offset_y // 2 + draw.text((x, y), text, font=new_font, fill="white") + img = np.expand_dims(np.array(img), axis=2).astype(np.float64) + return img + + +def draw_glyph2( + font, text, polygon, vertAng=10, scale=1, width=512, height=512, add_space=True +): + enlarge_polygon = polygon * scale + rect = cv2.minAreaRect(enlarge_polygon) + box = cv2.boxPoints(rect) + box = np.int0(box) + w, h = rect[1] + angle = rect[2] + if angle < -45: + angle += 90 + angle = -angle + if w < h: + angle += 90 + + vert = False + if abs(angle) % 90 < vertAng or abs(90 - abs(angle) % 90) % 90 < vertAng: + _w = max(box[:, 0]) - min(box[:, 0]) + _h = max(box[:, 1]) - min(box[:, 1]) + if _h >= _w: + vert = True + angle = 0 + + img = np.zeros((height * scale, width * scale, 3), np.uint8) + img = Image.fromarray(img) + + # infer font size + image4ratio = Image.new("RGB", img.size, "white") + draw = ImageDraw.Draw(image4ratio) + _, _, _tw, _th = draw.textbbox(xy=(0, 0), text=text, font=font) + text_w = min(w, h) * (_tw / _th) + if text_w <= max(w, h): + # add space + if len(text) > 1 and not vert and add_space: + for i in range(1, 100): + text_space = insert_spaces(text, i) + _, _, _tw2, _th2 = draw.textbbox(xy=(0, 0), text=text_space, font=font) + if min(w, h) * (_tw2 / _th2) > max(w, h): + break + text = insert_spaces(text, i - 1) + font_size = min(w, h) * 0.80 + else: + shrink = 0.75 if vert else 0.85 + font_size = min(w, h) / (text_w / max(w, h)) * shrink + new_font = font.font_variant(size=int(font_size)) + + left, top, right, bottom = new_font.getbbox(text) + text_width = right - left + text_height = bottom - top + + layer = Image.new("RGBA", img.size, (0, 0, 0, 0)) + draw = ImageDraw.Draw(layer) + if not vert: + draw.text( + (rect[0][0] - text_width // 2, rect[0][1] - text_height // 2 - top), + text, + font=new_font, + fill=(255, 255, 255, 255), + ) + else: + x_s = min(box[:, 0]) + _w // 2 - text_height // 2 + y_s = min(box[:, 1]) + for c in text: + draw.text((x_s, y_s), c, font=new_font, fill=(255, 255, 255, 255)) + _, _t, _, _b = new_font.getbbox(c) + y_s += _b + + rotated_layer = layer.rotate(angle, expand=1, center=(rect[0][0], rect[0][1])) + + x_offset = int((img.width - rotated_layer.width) / 2) + y_offset = int((img.height - rotated_layer.height) / 2) + img.paste(rotated_layer, (x_offset, y_offset), rotated_layer) + img = np.expand_dims(np.array(img.convert("1")), axis=2).astype(np.float64) + return img diff --git a/lama_cleaner/model/base.py b/iopaint/model/base.py similarity index 61% rename from lama_cleaner/model/base.py rename to iopaint/model/base.py index 08c27b1..2f23d70 100644 --- a/lama_cleaner/model/base.py +++ b/iopaint/model/base.py @@ -6,13 +6,15 @@ import torch import numpy as np from loguru import logger -from lama_cleaner.helper import ( +from iopaint.helper import ( boxes_from_mask, resize_max_size, pad_img_to_modulo, switch_mps_device, ) -from lama_cleaner.schema import Config, HDStrategy +from iopaint.schema import InpaintRequest, HDStrategy, SDSampler +from .helper.g_diffuser_bot import expand_image +from .utils import get_scheduler class InpaintModel: @@ -20,6 +22,7 @@ class InpaintModel: min_size: Optional[int] = None pad_mod = 8 pad_to_square = False + is_erase_model = False def __init__(self, device, **kwargs): """ @@ -38,10 +41,10 @@ class InpaintModel: @staticmethod @abc.abstractmethod def is_downloaded() -> bool: - ... + return False @abc.abstractmethod - def forward(self, image, mask, config: Config): + def forward(self, image, mask, config: InpaintRequest): """Input images and output images have same size images: [H, W, C] RGB masks: [H, W, 1] 255 为 masks 区域 @@ -49,7 +52,11 @@ class InpaintModel: """ ... - def _pad_forward(self, image, mask, config: Config): + @staticmethod + def download(): + ... + + def _pad_forward(self, image, mask, config: InpaintRequest): origin_height, origin_width = image.shape[:2] pad_image = pad_img_to_modulo( image, mod=self.pad_mod, square=self.pad_to_square, min_size=self.min_size @@ -58,29 +65,35 @@ class InpaintModel: mask, mod=self.pad_mod, square=self.pad_to_square, min_size=self.min_size ) - logger.info(f"final forward pad size: {pad_image.shape}") + # logger.info(f"final forward pad size: {pad_image.shape}") + + image, mask = self.forward_pre_process(image, mask, config) result = self.forward(pad_image, pad_mask, config) result = result[0:origin_height, 0:origin_width, :] result, image, mask = self.forward_post_process(result, image, mask, config) - mask = mask[:, :, np.newaxis] - result = result * (mask / 255) + image[:, :, ::-1] * (1 - (mask / 255)) + if config.sd_keep_unmasked_area: + mask = mask[:, :, np.newaxis] + result = result * (mask / 255) + image[:, :, ::-1] * (1 - (mask / 255)) return result + def forward_pre_process(self, image, mask, config): + return image, mask + def forward_post_process(self, result, image, mask, config): return result, image, mask @torch.no_grad() - def __call__(self, image, mask, config: Config): + def __call__(self, image, mask, config: InpaintRequest): """ images: [H, W, C] RGB, not normalized masks: [H, W] return: BGR IMAGE """ inpaint_result = None - logger.info(f"hd_strategy: {config.hd_strategy}") + # logger.info(f"hd_strategy: {config.hd_strategy}") if config.hd_strategy == HDStrategy.CROP: if max(image.shape) > config.hd_strategy_crop_trigger_size: logger.info(f"Run crop strategy") @@ -128,7 +141,7 @@ class InpaintModel: return inpaint_result - def _crop_box(self, image, mask, box, config: Config): + def _crop_box(self, image, mask, box, config: InpaintRequest): """ Args: @@ -176,7 +189,7 @@ class InpaintModel: crop_img = image[t:b, l:r, :] crop_mask = mask[t:b, l:r] - logger.info(f"box size: ({box_h},{box_w}) crop size: {crop_img.shape}") + # logger.info(f"box size: ({box_h},{box_w}) crop size: {crop_img.shape}") return crop_img, crop_mask, [l, t, r, b] @@ -198,6 +211,9 @@ class InpaintModel: def _match_histograms(self, source, reference, mask): transformed_channels = [] + if len(mask.shape) == 3: + mask = mask[:, :, -1] + for channel in range(source.shape[-1]): source_channel = source[:, :, channel] reference_channel = reference[:, :, channel] @@ -220,7 +236,7 @@ class InpaintModel: return result - def _apply_cropper(self, image, mask, config: Config): + def _apply_cropper(self, image, mask, config: InpaintRequest): img_h, img_w = image.shape[:2] l, t, w, h = ( config.croper_x, @@ -240,7 +256,7 @@ class InpaintModel: crop_mask = mask[t:b, l:r] return crop_img, crop_mask, (l, t, r, b) - def _run_box(self, image, mask, box, config: Config): + def _run_box(self, image, mask, box, config: InpaintRequest): """ Args: @@ -257,8 +273,13 @@ class InpaintModel: class DiffusionInpaintModel(InpaintModel): + def __init__(self, device, **kwargs): + self.model_info = kwargs["model_info"] + self.model_id_or_path = self.model_info.path + super().__init__(device, **kwargs) + @torch.no_grad() - def __call__(self, image, mask, config: Config): + def __call__(self, image, mask, config: InpaintRequest): """ images: [H, W, C] RGB, not normalized masks: [H, W] @@ -270,12 +291,80 @@ class DiffusionInpaintModel(InpaintModel): crop_image = self._scaled_pad_forward(crop_img, crop_mask, config) inpaint_result = image[:, :, ::-1] inpaint_result[t:b, l:r, :] = crop_image + elif config.use_extender: + inpaint_result = self._do_outpainting(image, config) else: inpaint_result = self._scaled_pad_forward(image, mask, config) return inpaint_result - def _scaled_pad_forward(self, image, mask, config: Config): + def _do_outpainting(self, image, config: InpaintRequest): + # cropper 和 image 在同一个坐标系下,croper_x/y 可能为负数 + # 从 image 中 crop 出 outpainting 区域 + image_h, image_w = image.shape[:2] + cropper_l = config.extender_x + cropper_t = config.extender_y + cropper_r = config.extender_x + config.extender_width + cropper_b = config.extender_y + config.extender_height + image_l = 0 + image_t = 0 + image_r = image_w + image_b = image_h + + # 类似求 IOU + l = max(cropper_l, image_l) + t = max(cropper_t, image_t) + r = min(cropper_r, image_r) + b = min(cropper_b, image_b) + + assert ( + 0 <= l < r and 0 <= t < b + ), f"cropper and image not overlap, {l},{t},{r},{b}" + + cropped_image = image[t:b, l:r, :] + padding_l = max(0, image_l - cropper_l) + padding_t = max(0, image_t - cropper_t) + padding_r = max(0, cropper_r - image_r) + padding_b = max(0, cropper_b - image_b) + + expanded_image, mask_image = expand_image( + cropped_image, + left=padding_l, + top=padding_t, + right=padding_r, + bottom=padding_b, + softness=config.sd_outpainting_softness, + space=config.sd_outpainting_space, + ) + + # 最终扩大了的 image, BGR + expanded_cropped_result_image = self._scaled_pad_forward( + expanded_image, mask_image, config + ) + + # RGB -> BGR + outpainting_image = cv2.copyMakeBorder( + image, + left=padding_l, + top=padding_t, + right=padding_r, + bottom=padding_b, + borderType=cv2.BORDER_CONSTANT, + value=0, + )[:, :, ::-1] + + # 把 cropped_result_image 贴到 outpainting_image 上,这一步不需要 blend + paste_t = 0 if config.extender_y < 0 else config.extender_y + paste_l = 0 if config.extender_x < 0 else config.extender_x + + outpainting_image[ + paste_t : paste_t + expanded_cropped_result_image.shape[0], + paste_l : paste_l + expanded_cropped_result_image.shape[1], + :, + ] = expanded_cropped_result_image + return outpainting_image + + def _scaled_pad_forward(self, image, mask, config: InpaintRequest): longer_side_length = int(config.sd_scale * max(image.shape[:2])) origin_size = image.shape[:2] downsize_image = resize_max_size(image, size_limit=longer_side_length) @@ -291,8 +380,39 @@ class DiffusionInpaintModel(InpaintModel): (origin_size[1], origin_size[0]), interpolation=cv2.INTER_CUBIC, ) - original_pixel_indices = mask < 127 - inpaint_result[original_pixel_indices] = image[:, :, ::-1][ - original_pixel_indices - ] + + # blend result, copy from g_diffuser_bot + # mask_rgb = 1.0 - np_img_grey_to_rgb(mask / 255.0) + # inpaint_result = np.clip( + # inpaint_result * (1.0 - mask_rgb) + image * mask_rgb, 0.0, 255.0 + # ) + # original_pixel_indices = mask < 127 + # inpaint_result[original_pixel_indices] = image[:, :, ::-1][ + # original_pixel_indices + # ] return inpaint_result + + def set_scheduler(self, config: InpaintRequest): + scheduler_config = self.model.scheduler.config + sd_sampler = config.sd_sampler + if config.sd_lcm_lora: + sd_sampler = SDSampler.lcm + logger.info(f"LCM Lora enabled, use {sd_sampler} sampler") + scheduler = get_scheduler(sd_sampler, scheduler_config) + self.model.scheduler = scheduler + + def forward_pre_process(self, image, mask, config): + if config.sd_mask_blur != 0: + k = 2 * config.sd_mask_blur + 1 + mask = cv2.GaussianBlur(mask, (k, k), 0)[:, :, np.newaxis] + + return image, mask + + def forward_post_process(self, result, image, mask, config): + if config.sd_match_histograms: + result = self._match_histograms(result, image[:, :, ::-1], mask) + + if config.sd_mask_blur != 0: + k = 2 * config.sd_mask_blur + 1 + mask = cv2.GaussianBlur(mask, (k, k), 0) + return result, image, mask diff --git a/iopaint/model/controlnet.py b/iopaint/model/controlnet.py new file mode 100644 index 0000000..c738b13 --- /dev/null +++ b/iopaint/model/controlnet.py @@ -0,0 +1,186 @@ +import PIL.Image +import cv2 +import torch +from diffusers import ControlNetModel +from loguru import logger +from iopaint.schema import InpaintRequest, ModelType + +from .base import DiffusionInpaintModel +from .helper.controlnet_preprocess import ( + make_canny_control_image, + make_openpose_control_image, + make_depth_control_image, + make_inpaint_control_image, +) +from .helper.cpu_text_encoder import CPUTextEncoderWrapper +from .original_sd_configs import get_config_files +from .utils import ( + get_scheduler, + handle_from_pretrained_exceptions, + get_torch_dtype, + enable_low_mem, + is_local_files_only, +) + + +class ControlNet(DiffusionInpaintModel): + name = "controlnet" + pad_mod = 8 + min_size = 512 + + @property + def lcm_lora_id(self): + if self.model_info.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SD_INPAINT, + ]: + return "latent-consistency/lcm-lora-sdv1-5" + if self.model_info.model_type in [ + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SDXL_INPAINT, + ]: + return "latent-consistency/lcm-lora-sdxl" + raise NotImplementedError(f"Unsupported controlnet lcm model {self.model_info}") + + def init_model(self, device: torch.device, **kwargs): + model_info = kwargs["model_info"] + controlnet_method = kwargs["controlnet_method"] + + self.model_info = model_info + self.controlnet_method = controlnet_method + + model_kwargs = { + **kwargs.get("pipe_components", {}), + "local_files_only": is_local_files_only(**kwargs), + } + self.local_files_only = model_kwargs["local_files_only"] + + disable_nsfw_checker = kwargs["disable_nsfw"] or kwargs.get( + "cpu_offload", False + ) + if disable_nsfw_checker: + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + self.torch_dtype = torch_dtype + + if model_info.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SD_INPAINT, + ]: + from diffusers import ( + StableDiffusionControlNetInpaintPipeline as PipeClass, + ) + elif model_info.model_type in [ + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SDXL_INPAINT, + ]: + from diffusers import ( + StableDiffusionXLControlNetInpaintPipeline as PipeClass, + ) + + controlnet = ControlNetModel.from_pretrained( + pretrained_model_name_or_path=controlnet_method, + resume_download=True, + local_files_only=model_kwargs["local_files_only"], + ) + if model_info.is_single_file_diffusers: + if self.model_info.model_type == ModelType.DIFFUSERS_SD: + model_kwargs["num_in_channels"] = 4 + else: + model_kwargs["num_in_channels"] = 9 + + self.model = PipeClass.from_single_file( + model_info.path, + controlnet=controlnet, + load_safety_checker=not disable_nsfw_checker, + torch_dtype=torch_dtype, + config_files=get_config_files(), + **model_kwargs, + ) + else: + self.model = handle_from_pretrained_exceptions( + PipeClass.from_pretrained, + pretrained_model_name_or_path=model_info.path, + controlnet=controlnet, + variant="fp16", + dtype=torch_dtype, + **model_kwargs, + ) + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + def switch_controlnet_method(self, new_method: str): + self.controlnet_method = new_method + controlnet = ControlNetModel.from_pretrained( + new_method, resume_download=True, local_files_only=self.local_files_only + ).to(self.model.device) + self.model.controlnet = controlnet + + def _get_control_image(self, image, mask): + if "canny" in self.controlnet_method: + control_image = make_canny_control_image(image) + elif "openpose" in self.controlnet_method: + control_image = make_openpose_control_image(image) + elif "depth" in self.controlnet_method: + control_image = make_depth_control_image(image) + elif "inpaint" in self.controlnet_method: + control_image = make_inpaint_control_image(image, mask) + else: + raise NotImplementedError(f"{self.controlnet_method} not implemented") + return control_image + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + scheduler_config = self.model.scheduler.config + scheduler = get_scheduler(config.sd_sampler, scheduler_config) + self.model.scheduler = scheduler + + img_h, img_w = image.shape[:2] + control_image = self._get_control_image(image, mask) + mask_image = PIL.Image.fromarray(mask[:, :, -1], mode="L") + image = PIL.Image.fromarray(image) + + output = self.model( + image=image, + mask_image=mask_image, + control_image=control_image, + prompt=config.prompt, + negative_prompt=config.negative_prompt, + num_inference_steps=config.sd_steps, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + controlnet_conditioning_scale=config.controlnet_conditioning_scale, + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/lama_cleaner/model/ddim_sampler.py b/iopaint/model/ddim_sampler.py similarity index 98% rename from lama_cleaner/model/ddim_sampler.py rename to iopaint/model/ddim_sampler.py index d1e4400..a3f44fd 100644 --- a/lama_cleaner/model/ddim_sampler.py +++ b/iopaint/model/ddim_sampler.py @@ -2,7 +2,7 @@ import torch import numpy as np from tqdm import tqdm -from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like +from .utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like from loguru import logger diff --git a/lama_cleaner/model/fcf.py b/iopaint/model/fcf.py similarity index 99% rename from lama_cleaner/model/fcf.py rename to iopaint/model/fcf.py index 07292c6..a6f2d42 100644 --- a/lama_cleaner/model/fcf.py +++ b/iopaint/model/fcf.py @@ -6,20 +6,21 @@ import torch import numpy as np import torch.fft as fft -from lama_cleaner.schema import Config +from iopaint.schema import InpaintRequest -from lama_cleaner.helper import ( +from iopaint.helper import ( load_model, get_cache_path_by_url, norm_img, boxes_from_mask, resize_max_size, + download_model, ) -from lama_cleaner.model.base import InpaintModel +from .base import InpaintModel from torch import conv2d, nn import torch.nn.functional as F -from lama_cleaner.model.utils import ( +from .utils import ( setup_filter, _parse_scaling, _parse_padding, @@ -870,7 +871,6 @@ class SpectralTransform(nn.Module): ) def forward(self, x): - x = self.downsample(x) x = self.conv1(x) output = self.fu(x) @@ -1437,7 +1437,6 @@ class SynthesisNetwork(torch.nn.Module): setattr(self, f"b{res}", block) def forward(self, x_global, mask, feats, ws, fname=None, **block_kwargs): - img = None x, img = self.foreword(x_global, ws, feats, img) @@ -1627,6 +1626,7 @@ class FcF(InpaintModel): min_size = 512 pad_mod = 512 pad_to_square = True + is_erase_model = True def init_model(self, device, **kwargs): seed = 0 @@ -1656,12 +1656,16 @@ class FcF(InpaintModel): self.model = load_model(G, FCF_MODEL_URL, device, FCF_MODEL_MD5) self.label = torch.zeros([1, self.model.c_dim], device=device) + @staticmethod + def download(): + download_model(FCF_MODEL_URL, FCF_MODEL_MD5) + @staticmethod def is_downloaded() -> bool: return os.path.exists(get_cache_path_by_url(FCF_MODEL_URL)) @torch.no_grad() - def __call__(self, image, mask, config: Config): + def __call__(self, image, mask, config: InpaintRequest): """ images: [H, W, C] RGB, not normalized masks: [H, W] @@ -1694,14 +1698,14 @@ class FcF(InpaintModel): crop_result.append((inpaint_result, crop_box)) - inpaint_result = image[:, :, ::-1] + inpaint_result = image[:, :, ::-1].copy() for crop_image, crop_box in crop_result: x1, y1, x2, y2 = crop_box inpaint_result[y1:y2, x1:x2, :] = crop_image return inpaint_result - def forward(self, image, mask, config: Config): + def forward(self, image, mask, config: InpaintRequest): """Input images and output images have same size images: [H, W, C] RGB masks: [H, W] mask area == 255 diff --git a/iopaint/model/helper/__init__.py b/iopaint/model/helper/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iopaint/model/helper/controlnet_preprocess.py b/iopaint/model/helper/controlnet_preprocess.py new file mode 100644 index 0000000..75c409f --- /dev/null +++ b/iopaint/model/helper/controlnet_preprocess.py @@ -0,0 +1,68 @@ +import torch +import PIL +import cv2 +from PIL import Image +import numpy as np + +from iopaint.helper import pad_img_to_modulo + + +def make_canny_control_image(image: np.ndarray) -> Image: + canny_image = cv2.Canny(image, 100, 200) + canny_image = canny_image[:, :, None] + canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2) + canny_image = PIL.Image.fromarray(canny_image) + control_image = canny_image + return control_image + + +def make_openpose_control_image(image: np.ndarray) -> Image: + from controlnet_aux import OpenposeDetector + + processor = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") + control_image = processor(image, hand_and_face=True) + return control_image + + +def resize_image(input_image, resolution): + H, W, C = input_image.shape + H = float(H) + W = float(W) + k = float(resolution) / min(H, W) + H *= k + W *= k + H = int(np.round(H / 64.0)) * 64 + W = int(np.round(W / 64.0)) * 64 + img = cv2.resize( + input_image, + (W, H), + interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA, + ) + return img + + +def make_depth_control_image(image: np.ndarray) -> Image: + from controlnet_aux import MidasDetector + + midas = MidasDetector.from_pretrained("lllyasviel/Annotators") + + origin_height, origin_width = image.shape[:2] + pad_image = pad_img_to_modulo(image, mod=64, square=False, min_size=512) + depth_image = midas(pad_image) + depth_image = depth_image[0:origin_height, 0:origin_width] + depth_image = depth_image[:, :, None] + depth_image = np.concatenate([depth_image, depth_image, depth_image], axis=2) + control_image = PIL.Image.fromarray(depth_image) + return control_image + + +def make_inpaint_control_image(image: np.ndarray, mask: np.ndarray) -> torch.Tensor: + """ + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + """ + image = image.astype(np.float32) / 255.0 + image[mask[:, :, -1] > 128] = -1.0 # set as masked pixel + image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + return image diff --git a/iopaint/model/helper/cpu_text_encoder.py b/iopaint/model/helper/cpu_text_encoder.py new file mode 100644 index 0000000..889bc97 --- /dev/null +++ b/iopaint/model/helper/cpu_text_encoder.py @@ -0,0 +1,32 @@ +import torch +from transformers import PreTrainedModel + +from ..utils import torch_gc + + +class CPUTextEncoderWrapper(PreTrainedModel): + def __init__(self, text_encoder, torch_dtype): + super().__init__(text_encoder.config) + self.config = text_encoder.config + # cpu not support float16 + self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True) + self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True) + self.torch_dtype = torch_dtype + del text_encoder + torch_gc() + + def __call__(self, x, **kwargs): + input_device = x.device + original_output = self.text_encoder(x.to(self.text_encoder.device), **kwargs) + for k, v in original_output.items(): + if isinstance(v, tuple): + original_output[k] = [ + v[i].to(input_device).to(self.torch_dtype) for i in range(len(v)) + ] + else: + original_output[k] = v.to(input_device).to(self.torch_dtype) + return original_output + + @property + def dtype(self): + return self.torch_dtype diff --git a/iopaint/model/helper/g_diffuser_bot.py b/iopaint/model/helper/g_diffuser_bot.py new file mode 100644 index 0000000..a4147af --- /dev/null +++ b/iopaint/model/helper/g_diffuser_bot.py @@ -0,0 +1,167 @@ +# code copy from: https://github.com/parlance-zz/g-diffuser-bot +import cv2 +import numpy as np + + +def np_img_grey_to_rgb(data): + if data.ndim == 3: + return data + return np.expand_dims(data, 2) * np.ones((1, 1, 3)) + + +def convolve(data1, data2): # fast convolution with fft + if data1.ndim != data2.ndim: # promote to rgb if mismatch + if data1.ndim < 3: + data1 = np_img_grey_to_rgb(data1) + if data2.ndim < 3: + data2 = np_img_grey_to_rgb(data2) + return ifft2(fft2(data1) * fft2(data2)) + + +def fft2(data): + if data.ndim > 2: # multiple channels + out_fft = np.zeros( + (data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128 + ) + for c in range(data.shape[2]): + c_data = data[:, :, c] + out_fft[:, :, c] = np.fft.fft2(np.fft.fftshift(c_data), norm="ortho") + out_fft[:, :, c] = np.fft.ifftshift(out_fft[:, :, c]) + else: # single channel + out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128) + out_fft[:, :] = np.fft.fft2(np.fft.fftshift(data), norm="ortho") + out_fft[:, :] = np.fft.ifftshift(out_fft[:, :]) + + return out_fft + + +def ifft2(data): + if data.ndim > 2: # multiple channels + out_ifft = np.zeros( + (data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128 + ) + for c in range(data.shape[2]): + c_data = data[:, :, c] + out_ifft[:, :, c] = np.fft.ifft2(np.fft.fftshift(c_data), norm="ortho") + out_ifft[:, :, c] = np.fft.ifftshift(out_ifft[:, :, c]) + else: # single channel + out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128) + out_ifft[:, :] = np.fft.ifft2(np.fft.fftshift(data), norm="ortho") + out_ifft[:, :] = np.fft.ifftshift(out_ifft[:, :]) + + return out_ifft + + +def get_gradient_kernel(width, height, std=3.14, mode="linear"): + window_scale_x = float( + width / min(width, height) + ) # for non-square aspect ratios we still want a circular kernel + window_scale_y = float(height / min(width, height)) + if mode == "gaussian": + x = (np.arange(width) / width * 2.0 - 1.0) * window_scale_x + kx = np.exp(-x * x * std) + if window_scale_x != window_scale_y: + y = (np.arange(height) / height * 2.0 - 1.0) * window_scale_y + ky = np.exp(-y * y * std) + else: + y = x + ky = kx + return np.outer(kx, ky) + elif mode == "linear": + x = (np.arange(width) / width * 2.0 - 1.0) * window_scale_x + if window_scale_x != window_scale_y: + y = (np.arange(height) / height * 2.0 - 1.0) * window_scale_y + else: + y = x + return np.clip(1.0 - np.sqrt(np.add.outer(x * x, y * y)) * std / 3.14, 0.0, 1.0) + else: + raise Exception("Error: Unknown mode in get_gradient_kernel: {0}".format(mode)) + + +def image_blur(data, std=3.14, mode="linear"): + width = data.shape[0] + height = data.shape[1] + kernel = get_gradient_kernel(width, height, std, mode=mode) + return np.real(convolve(data, kernel / np.sqrt(np.sum(kernel * kernel)))) + + +def soften_mask(mask_img, softness, space): + if softness == 0: + return mask_img + softness = min(softness, 1.0) + space = np.clip(space, 0.0, 1.0) + original_max_opacity = np.max(mask_img) + out_mask = mask_img <= 0.0 + blurred_mask = image_blur(mask_img, 3.5 / softness, mode="linear") + blurred_mask = np.maximum(blurred_mask - np.max(blurred_mask[out_mask]), 0.0) + mask_img *= blurred_mask # preserve partial opacity in original input mask + mask_img /= np.max(mask_img) # renormalize + mask_img = np.clip(mask_img - space, 0.0, 1.0) # make space + mask_img /= np.max(mask_img) # and renormalize again + mask_img *= original_max_opacity # restore original max opacity + return mask_img + + +def expand_image( + cv2_img, top: int, right: int, bottom: int, left: int, softness: float, space: float +): + assert cv2_img.shape[2] == 3 + origin_h, origin_w = cv2_img.shape[:2] + new_width = cv2_img.shape[1] + left + right + new_height = cv2_img.shape[0] + top + bottom + + # TODO: which is better? + # new_img = np.random.randint(0, 255, (new_height, new_width, 3), np.uint8) + new_img = cv2.copyMakeBorder( + cv2_img, top, bottom, left, right, cv2.BORDER_REPLICATE + ) + mask_img = np.zeros((new_height, new_width), np.uint8) + mask_img[top : top + cv2_img.shape[0], left : left + cv2_img.shape[1]] = 255 + + if softness > 0.0: + mask_img = soften_mask(mask_img / 255.0, softness / 100.0, space / 100.0) + mask_img = (np.clip(mask_img, 0.0, 1.0) * 255.0).astype(np.uint8) + + mask_image = 255.0 - mask_img # extract mask from alpha channel and invert + rgb_init_image = ( + 0.0 + new_img[:, :, 0:3] + ) # strip mask from init_img leaving only rgb channels + + hard_mask = np.zeros_like(cv2_img[:, :, 0]) + if top != 0: + hard_mask[0 : origin_h // 2, :] = 255 + if bottom != 0: + hard_mask[origin_h // 2 :, :] = 255 + if left != 0: + hard_mask[:, 0 : origin_w // 2] = 255 + if right != 0: + hard_mask[:, origin_w // 2 :] = 255 + + hard_mask = cv2.copyMakeBorder( + hard_mask, top, bottom, left, right, cv2.BORDER_DEFAULT, value=255 + ) + mask_image = np.where(hard_mask > 0, mask_image, 0) + return rgb_init_image.astype(np.uint8), mask_image.astype(np.uint8) + + +if __name__ == "__main__": + from pathlib import Path + + current_dir = Path(__file__).parent.absolute().resolve() + image_path = current_dir.parent / "tests" / "bunny.jpeg" + init_image = cv2.imread(str(image_path)) + init_image, mask_image = expand_image( + init_image, + top=100, + right=100, + bottom=100, + left=100, + softness=20, + space=20, + ) + print(mask_image.dtype, mask_image.min(), mask_image.max()) + print(init_image.dtype, init_image.min(), init_image.max()) + mask_image = mask_image.astype(np.uint8) + init_image = init_image.astype(np.uint8) + cv2.imwrite("expanded_image.png", init_image) + cv2.imwrite("expanded_mask.png", mask_image) diff --git a/iopaint/model/instruct_pix2pix.py b/iopaint/model/instruct_pix2pix.py new file mode 100644 index 0000000..fc8cd26 --- /dev/null +++ b/iopaint/model/instruct_pix2pix.py @@ -0,0 +1,64 @@ +import PIL.Image +import cv2 +import torch +from loguru import logger + +from iopaint.const import INSTRUCT_PIX2PIX_NAME +from .base import DiffusionInpaintModel +from iopaint.schema import InpaintRequest +from .utils import get_torch_dtype, enable_low_mem, is_local_files_only + + +class InstructPix2Pix(DiffusionInpaintModel): + name = INSTRUCT_PIX2PIX_NAME + pad_mod = 8 + min_size = 512 + + def init_model(self, device: torch.device, **kwargs): + from diffusers import StableDiffusionInstructPix2PixPipeline + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + + model_kwargs = {"local_files_only": is_local_files_only(**kwargs)} + if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + self.model = StableDiffusionInstructPix2PixPipeline.from_pretrained( + self.name, variant="fp16", torch_dtype=torch_dtype, **model_kwargs + ) + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + edit = pipe(prompt, image=image, num_inference_steps=20, image_guidance_scale=1.5, guidance_scale=7).images[0] + """ + output = self.model( + image=PIL.Image.fromarray(image), + prompt=config.prompt, + negative_prompt=config.negative_prompt, + num_inference_steps=config.sd_steps, + image_guidance_scale=config.p2p_image_guidance_scale, + guidance_scale=config.sd_guidance_scale, + output_type="np", + generator=torch.manual_seed(config.sd_seed), + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/iopaint/model/kandinsky.py b/iopaint/model/kandinsky.py new file mode 100644 index 0000000..1a0bf1c --- /dev/null +++ b/iopaint/model/kandinsky.py @@ -0,0 +1,65 @@ +import PIL.Image +import cv2 +import numpy as np +import torch + +from iopaint.const import KANDINSKY22_NAME +from .base import DiffusionInpaintModel +from iopaint.schema import InpaintRequest +from .utils import get_torch_dtype, enable_low_mem, is_local_files_only + + +class Kandinsky(DiffusionInpaintModel): + pad_mod = 64 + min_size = 512 + + def init_model(self, device: torch.device, **kwargs): + from diffusers import AutoPipelineForInpainting + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + + model_kwargs = { + "torch_dtype": torch_dtype, + "local_files_only": is_local_files_only(**kwargs), + } + self.model = AutoPipelineForInpainting.from_pretrained( + self.name, **model_kwargs + ).to(device) + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + generator = torch.manual_seed(config.sd_seed) + mask = mask.astype(np.float32) / 255 + img_h, img_w = image.shape[:2] + + # kandinsky 没有 strength + output = self.model( + prompt=config.prompt, + negative_prompt=config.negative_prompt, + image=PIL.Image.fromarray(image), + mask_image=mask[:, :, 0], + height=img_h, + width=img_w, + num_inference_steps=config.sd_steps, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + generator=generator, + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output + + +class Kandinsky22(Kandinsky): + name = KANDINSKY22_NAME diff --git a/lama_cleaner/model/lama.py b/iopaint/model/lama.py similarity index 81% rename from lama_cleaner/model/lama.py rename to iopaint/model/lama.py index bdcdf0d..7aba242 100644 --- a/lama_cleaner/model/lama.py +++ b/iopaint/model/lama.py @@ -4,13 +4,14 @@ import cv2 import numpy as np import torch -from lama_cleaner.helper import ( +from iopaint.helper import ( norm_img, get_cache_path_by_url, load_jit_model, + download_model, ) -from lama_cleaner.model.base import InpaintModel -from lama_cleaner.schema import Config +from iopaint.schema import InpaintRequest +from .base import InpaintModel LAMA_MODEL_URL = os.environ.get( "LAMA_MODEL_URL", @@ -22,6 +23,11 @@ LAMA_MODEL_MD5 = os.environ.get("LAMA_MODEL_MD5", "e3aa4aaa15225a33ec84f9f4bc47e class LaMa(InpaintModel): name = "lama" pad_mod = 8 + is_erase_model = True + + @staticmethod + def download(): + download_model(LAMA_MODEL_URL, LAMA_MODEL_MD5) def init_model(self, device, **kwargs): self.model = load_jit_model(LAMA_MODEL_URL, device, LAMA_MODEL_MD5).eval() @@ -30,7 +36,7 @@ class LaMa(InpaintModel): def is_downloaded() -> bool: return os.path.exists(get_cache_path_by_url(LAMA_MODEL_URL)) - def forward(self, image, mask, config: Config): + def forward(self, image, mask, config: InpaintRequest): """Input image and output image have same size image: [H, W, C] RGB mask: [H, W] diff --git a/lama_cleaner/model/ldm.py b/iopaint/model/ldm.py similarity index 95% rename from lama_cleaner/model/ldm.py rename to iopaint/model/ldm.py index a5b6d12..19e51a3 100644 --- a/lama_cleaner/model/ldm.py +++ b/iopaint/model/ldm.py @@ -4,20 +4,20 @@ import numpy as np import torch from loguru import logger -from lama_cleaner.model.base import InpaintModel -from lama_cleaner.model.ddim_sampler import DDIMSampler -from lama_cleaner.model.plms_sampler import PLMSSampler -from lama_cleaner.schema import Config, LDMSampler +from .base import InpaintModel +from .ddim_sampler import DDIMSampler +from .plms_sampler import PLMSSampler +from iopaint.schema import InpaintRequest, LDMSampler torch.manual_seed(42) import torch.nn as nn -from lama_cleaner.helper import ( +from iopaint.helper import ( download_model, norm_img, get_cache_path_by_url, load_jit_model, ) -from lama_cleaner.model.utils import ( +from .utils import ( make_beta_schedule, timestep_embedding, ) @@ -237,6 +237,7 @@ class LatentDiffusion(DDPM): class LDM(InpaintModel): name = "ldm" pad_mod = 32 + is_erase_model = True def __init__(self, device, fp16: bool = True, **kwargs): self.fp16 = fp16 @@ -260,6 +261,12 @@ class LDM(InpaintModel): self.model = LatentDiffusion(self.diffusion_model, device) + @staticmethod + def download(): + download_model(LDM_DIFFUSION_MODEL_URL, LDM_DIFFUSION_MODEL_MD5) + download_model(LDM_DECODE_MODEL_URL, LDM_DECODE_MODEL_MD5) + download_model(LDM_ENCODE_MODEL_URL, LDM_ENCODE_MODEL_MD5) + @staticmethod def is_downloaded() -> bool: model_paths = [ @@ -270,7 +277,7 @@ class LDM(InpaintModel): return all([os.path.exists(it) for it in model_paths]) @torch.cuda.amp.autocast() - def forward(self, image, mask, config: Config): + def forward(self, image, mask, config: InpaintRequest): """ image: [H, W, C] RGB mask: [H, W, 1] diff --git a/lama_cleaner/model/manga.py b/iopaint/model/manga.py similarity index 86% rename from lama_cleaner/model/manga.py rename to iopaint/model/manga.py index f6e27e5..1f58251 100644 --- a/lama_cleaner/model/manga.py +++ b/iopaint/model/manga.py @@ -7,9 +7,9 @@ import torch import time from loguru import logger -from lama_cleaner.helper import get_cache_path_by_url, load_jit_model -from lama_cleaner.model.base import InpaintModel -from lama_cleaner.schema import Config +from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model +from .base import InpaintModel +from iopaint.schema import InpaintRequest MANGA_INPAINTOR_MODEL_URL = os.environ.get( @@ -32,6 +32,7 @@ MANGA_LINE_MODEL_MD5 = os.environ.get( class Manga(InpaintModel): name = "manga" pad_mod = 16 + is_erase_model = True def init_model(self, device, **kwargs): self.inpaintor_model = load_jit_model( @@ -42,6 +43,11 @@ class Manga(InpaintModel): ) self.seed = 42 + @staticmethod + def download(): + download_model(MANGA_INPAINTOR_MODEL_URL, MANGA_INPAINTOR_MODEL_MD5) + download_model(MANGA_LINE_MODEL_URL, MANGA_LINE_MODEL_MD5) + @staticmethod def is_downloaded() -> bool: model_paths = [ @@ -50,7 +56,7 @@ class Manga(InpaintModel): ] return all([os.path.exists(it) for it in model_paths]) - def forward(self, image, mask, config: Config): + def forward(self, image, mask, config: InpaintRequest): """ image: [H, W, C] RGB mask: [H, W, 1] diff --git a/lama_cleaner/model/mat.py b/iopaint/model/mat.py similarity index 97% rename from lama_cleaner/model/mat.py rename to iopaint/model/mat.py index 3e09bf4..0c5360f 100644 --- a/lama_cleaner/model/mat.py +++ b/iopaint/model/mat.py @@ -8,9 +8,15 @@ import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint -from lama_cleaner.helper import load_model, get_cache_path_by_url, norm_img -from lama_cleaner.model.base import InpaintModel -from lama_cleaner.model.utils import ( +from iopaint.helper import ( + load_model, + get_cache_path_by_url, + norm_img, + download_model, +) +from iopaint.schema import InpaintRequest +from .base import InpaintModel +from .utils import ( setup_filter, Conv2dLayer, FullyConnectedLayer, @@ -23,7 +29,6 @@ from lama_cleaner.model.utils import ( normalize_2nd_moment, set_seed, ) -from lama_cleaner.schema import Config class ModulatedConv2d(nn.Module): @@ -52,7 +57,7 @@ class ModulatedConv2d(nn.Module): ) self.out_channels = out_channels self.kernel_size = kernel_size - self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) + self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size**2)) self.padding = self.kernel_size // 2 self.up = up self.down = down @@ -213,7 +218,7 @@ class DecBlockFirst(nn.Module): super().__init__() self.fc = FullyConnectedLayer( in_features=in_channels * 2, - out_features=in_channels * 4 ** 2, + out_features=in_channels * 4**2, activation=activation, ) self.conv = StyleConv( @@ -312,7 +317,7 @@ class DecBlock(nn.Module): in_channels=in_channels, out_channels=out_channels, style_dim=style_dim, - resolution=2 ** res, + resolution=2**res, kernel_size=3, up=2, use_noise=use_noise, @@ -323,7 +328,7 @@ class DecBlock(nn.Module): in_channels=out_channels, out_channels=out_channels, style_dim=style_dim, - resolution=2 ** res, + resolution=2**res, kernel_size=3, use_noise=use_noise, activation=activation, @@ -507,7 +512,7 @@ class Discriminator(torch.nn.Module): self.img_channels = img_channels resolution_log2 = int(np.log2(img_resolution)) - assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4 + assert img_resolution == 2**resolution_log2 and img_resolution >= 4 self.resolution_log2 = resolution_log2 def nf(stage): @@ -543,7 +548,7 @@ class Discriminator(torch.nn.Module): ) self.Dis = nn.Sequential(*Dis) - self.fc0 = FullyConnectedLayer(nf(2) * 4 ** 2, nf(2), activation=activation) + self.fc0 = FullyConnectedLayer(nf(2) * 4**2, nf(2), activation=activation) self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim) def forward(self, images_in, masks_in, c): @@ -562,7 +567,7 @@ class Discriminator(torch.nn.Module): def nf(stage, channel_base=32768, channel_decay=1.0, channel_max=512): NF = {512: 64, 256: 128, 128: 256, 64: 512, 32: 512, 16: 512, 8: 512, 4: 512} - return NF[2 ** stage] + return NF[2**stage] class Mlp(nn.Module): @@ -659,7 +664,7 @@ class Conv2dLayerPartial(nn.Module): ) self.weight_maskUpdater = torch.ones(1, 1, kernel_size, kernel_size) - self.slide_winsize = kernel_size ** 2 + self.slide_winsize = kernel_size**2 self.stride = down self.padding = kernel_size // 2 if kernel_size % 2 == 1 else 0 @@ -715,7 +720,7 @@ class WindowAttention(nn.Module): self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 + self.scale = qk_scale or head_dim**-0.5 self.q = FullyConnectedLayer(in_features=dim, out_features=dim) self.k = FullyConnectedLayer(in_features=dim, out_features=dim) @@ -1211,7 +1216,7 @@ class Encoder(nn.Module): self.resolution = [] for idx, i in enumerate(range(res_log2, 3, -1)): # from input size to 16x16 - res = 2 ** i + res = 2**i self.resolution.append(res) if i == res_log2: block = EncFromRGB(img_channels * 2 + 1, nf(i), activation) @@ -1296,7 +1301,7 @@ class DecBlockFirstV2(nn.Module): in_channels=in_channels, out_channels=out_channels, style_dim=style_dim, - resolution=2 ** res, + resolution=2**res, kernel_size=3, use_noise=use_noise, activation=activation, @@ -1341,7 +1346,7 @@ class DecBlock(nn.Module): in_channels=in_channels, out_channels=out_channels, style_dim=style_dim, - resolution=2 ** res, + resolution=2**res, kernel_size=3, up=2, use_noise=use_noise, @@ -1352,7 +1357,7 @@ class DecBlock(nn.Module): in_channels=out_channels, out_channels=out_channels, style_dim=style_dim, - resolution=2 ** res, + resolution=2**res, kernel_size=3, use_noise=use_noise, activation=activation, @@ -1389,7 +1394,7 @@ class Decoder(nn.Module): for res in range(5, res_log2 + 1): setattr( self, - "Dec_%dx%d" % (2 ** res, 2 ** res), + "Dec_%dx%d" % (2**res, 2**res), DecBlock( res, nf(res - 1), @@ -1406,7 +1411,7 @@ class Decoder(nn.Module): def forward(self, x, ws, gs, E_features, noise_mode="random"): x, img = self.Dec_16x16(x, ws, gs, E_features, noise_mode=noise_mode) for res in range(5, self.res_log2 + 1): - block = getattr(self, "Dec_%dx%d" % (2 ** res, 2 ** res)) + block = getattr(self, "Dec_%dx%d" % (2**res, 2**res)) x, img = block(x, img, ws, gs, E_features, noise_mode=noise_mode) return img @@ -1431,7 +1436,7 @@ class DecStyleBlock(nn.Module): in_channels=in_channels, out_channels=out_channels, style_dim=style_dim, - resolution=2 ** res, + resolution=2**res, kernel_size=3, up=2, use_noise=use_noise, @@ -1442,7 +1447,7 @@ class DecStyleBlock(nn.Module): in_channels=out_channels, out_channels=out_channels, style_dim=style_dim, - resolution=2 ** res, + resolution=2**res, kernel_size=3, use_noise=use_noise, activation=activation, @@ -1640,7 +1645,7 @@ class SynthesisNet(nn.Module): ): super().__init__() resolution_log2 = int(np.log2(img_resolution)) - assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4 + assert img_resolution == 2**resolution_log2 and img_resolution >= 4 self.num_layers = resolution_log2 * 2 - 3 * 2 self.img_resolution = img_resolution @@ -1781,7 +1786,7 @@ class Discriminator(torch.nn.Module): self.img_channels = img_channels resolution_log2 = int(np.log2(img_resolution)) - assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4 + assert img_resolution == 2**resolution_log2 and img_resolution >= 4 self.resolution_log2 = resolution_log2 if cmap_dim == None: @@ -1812,7 +1817,7 @@ class Discriminator(torch.nn.Module): ) self.Dis = nn.Sequential(*Dis) - self.fc0 = FullyConnectedLayer(nf(2) * 4 ** 2, nf(2), activation=activation) + self.fc0 = FullyConnectedLayer(nf(2) * 4**2, nf(2), activation=activation) self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim) # for 64x64 @@ -1837,7 +1842,7 @@ class Discriminator(torch.nn.Module): self.Dis_stg1 = nn.Sequential(*Dis_stg1) self.fc0_stg1 = FullyConnectedLayer( - nf(2) // 2 * 4 ** 2, nf(2) // 2, activation=activation + nf(2) // 2 * 4**2, nf(2) // 2, activation=activation ) self.fc1_stg1 = FullyConnectedLayer( nf(2) // 2, 1 if cmap_dim == 0 else cmap_dim @@ -1875,6 +1880,7 @@ class MAT(InpaintModel): min_size = 512 pad_mod = 512 pad_to_square = True + is_erase_model = True def init_model(self, device, **kwargs): seed = 240 # pick up a random number @@ -1898,11 +1904,15 @@ class MAT(InpaintModel): self.label = torch.zeros([1, self.model.c_dim], device=device).to(self.torch_dtype) # fmt: on + @staticmethod + def download(): + download_model(MAT_MODEL_URL, MAT_MODEL_MD5) + @staticmethod def is_downloaded() -> bool: return os.path.exists(get_cache_path_by_url(MAT_MODEL_URL)) - def forward(self, image, mask, config: Config): + def forward(self, image, mask, config: InpaintRequest): """Input images and output images have same size images: [H, W, C] RGB masks: [H, W] mask area == 255 diff --git a/iopaint/model/mi_gan.py b/iopaint/model/mi_gan.py new file mode 100644 index 0000000..f1ce25f --- /dev/null +++ b/iopaint/model/mi_gan.py @@ -0,0 +1,110 @@ +import os + +import cv2 +import torch + +from iopaint.helper import ( + load_jit_model, + download_model, + get_cache_path_by_url, + boxes_from_mask, + resize_max_size, + norm_img, +) +from .base import InpaintModel +from iopaint.schema import InpaintRequest + +MIGAN_MODEL_URL = os.environ.get( + "MIGAN_MODEL_URL", + "https://github.com/Sanster/models/releases/download/migan/migan_traced.pt", +) +MIGAN_MODEL_MD5 = os.environ.get("MIGAN_MODEL_MD5", "76eb3b1a71c400ee3290524f7a11b89c") + + +class MIGAN(InpaintModel): + name = "migan" + min_size = 512 + pad_mod = 512 + pad_to_square = True + is_erase_model = True + + def init_model(self, device, **kwargs): + self.model = load_jit_model(MIGAN_MODEL_URL, device, MIGAN_MODEL_MD5).eval() + + @staticmethod + def download(): + download_model(MIGAN_MODEL_URL, MIGAN_MODEL_MD5) + + @staticmethod + def is_downloaded() -> bool: + return os.path.exists(get_cache_path_by_url(MIGAN_MODEL_URL)) + + @torch.no_grad() + def __call__(self, image, mask, config: InpaintRequest): + """ + images: [H, W, C] RGB, not normalized + masks: [H, W] + return: BGR IMAGE + """ + if image.shape[0] == 512 and image.shape[1] == 512: + return self._pad_forward(image, mask, config) + + boxes = boxes_from_mask(mask) + crop_result = [] + config.hd_strategy_crop_margin = 128 + for box in boxes: + crop_image, crop_mask, crop_box = self._crop_box(image, mask, box, config) + origin_size = crop_image.shape[:2] + resize_image = resize_max_size(crop_image, size_limit=512) + resize_mask = resize_max_size(crop_mask, size_limit=512) + inpaint_result = self._pad_forward(resize_image, resize_mask, config) + + # only paste masked area result + inpaint_result = cv2.resize( + inpaint_result, + (origin_size[1], origin_size[0]), + interpolation=cv2.INTER_CUBIC, + ) + + original_pixel_indices = crop_mask < 127 + inpaint_result[original_pixel_indices] = crop_image[:, :, ::-1][ + original_pixel_indices + ] + + crop_result.append((inpaint_result, crop_box)) + + inpaint_result = image[:, :, ::-1].copy() + for crop_image, crop_box in crop_result: + x1, y1, x2, y2 = crop_box + inpaint_result[y1:y2, x1:x2, :] = crop_image + + return inpaint_result + + def forward(self, image, mask, config: InpaintRequest): + """Input images and output images have same size + images: [H, W, C] RGB + masks: [H, W] mask area == 255 + return: BGR IMAGE + """ + + image = norm_img(image) # [0, 1] + image = image * 2 - 1 # [0, 1] -> [-1, 1] + mask = (mask > 120) * 255 + mask = norm_img(mask) + + image = torch.from_numpy(image).unsqueeze(0).to(self.device) + mask = torch.from_numpy(mask).unsqueeze(0).to(self.device) + + erased_img = image * (1 - mask) + input_image = torch.cat([0.5 - mask, erased_img], dim=1) + + output = self.model(input_image) + output = ( + (output.permute(0, 2, 3, 1) * 127.5 + 127.5) + .round() + .clamp(0, 255) + .to(torch.uint8) + ) + output = output[0].cpu().numpy() + cur_res = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return cur_res diff --git a/lama_cleaner/model/opencv2.py b/iopaint/model/opencv2.py similarity index 78% rename from lama_cleaner/model/opencv2.py rename to iopaint/model/opencv2.py index e0618dd..de47209 100644 --- a/lama_cleaner/model/opencv2.py +++ b/iopaint/model/opencv2.py @@ -1,6 +1,6 @@ import cv2 -from lama_cleaner.model.base import InpaintModel -from lama_cleaner.schema import Config +from .base import InpaintModel +from iopaint.schema import InpaintRequest flag_map = {"INPAINT_NS": cv2.INPAINT_NS, "INPAINT_TELEA": cv2.INPAINT_TELEA} @@ -8,12 +8,13 @@ flag_map = {"INPAINT_NS": cv2.INPAINT_NS, "INPAINT_TELEA": cv2.INPAINT_TELEA} class OpenCV2(InpaintModel): name = "cv2" pad_mod = 1 + is_erase_model = True @staticmethod def is_downloaded() -> bool: return True - def forward(self, image, mask, config: Config): + def forward(self, image, mask, config: InpaintRequest): """Input image and output image have same size image: [H, W, C] RGB mask: [H, W, 1] diff --git a/iopaint/model/original_sd_configs/__init__.py b/iopaint/model/original_sd_configs/__init__.py new file mode 100644 index 0000000..23896a7 --- /dev/null +++ b/iopaint/model/original_sd_configs/__init__.py @@ -0,0 +1,19 @@ +from pathlib import Path +from typing import Dict + +CURRENT_DIR = Path(__file__).parent.absolute() + + +def get_config_files() -> Dict[str, Path]: + """ + - `v1`: Config file for Stable Diffusion v1 + - `v2`: Config file for Stable Diffusion v2 + - `xl`: Config file for Stable Diffusion XL + - `xl_refiner`: Config file for Stable Diffusion XL Refiner + """ + return { + "v1": CURRENT_DIR / "v1-inference.yaml", + "v2": CURRENT_DIR / "v2-inference-v.yaml", + "xl": CURRENT_DIR / "sd_xl_base.yaml", + "xl_refiner": CURRENT_DIR / "sd_xl_refiner.yaml", + } diff --git a/iopaint/model/original_sd_configs/sd_xl_base.yaml b/iopaint/model/original_sd_configs/sd_xl_base.yaml new file mode 100644 index 0000000..6047379 --- /dev/null +++ b/iopaint/model/original_sd_configs/sd_xl_base.yaml @@ -0,0 +1,93 @@ +model: + target: sgm.models.diffusion.DiffusionEngine + params: + scale_factor: 0.13025 + disable_first_stage_autocast: True + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + adm_in_channels: 2816 + num_classes: sequential + use_checkpoint: True + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [4, 2] + num_res_blocks: 2 + channel_mult: [1, 2, 4] + num_head_channels: 64 + use_linear_in_transformer: True + transformer_depth: [1, 2, 10] + context_dim: 2048 + spatial_transformer_attn_type: softmax-xformers + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenCLIPEmbedder + params: + layer: hidden + layer_idx: 11 + + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2 + params: + arch: ViT-bigG-14 + version: laion2b_s39b_b160k + freeze: True + layer: penultimate + always_return_pooled: True + legacy: False + + - is_trainable: False + input_key: original_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + - is_trainable: False + input_key: crop_coords_top_left + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + - is_trainable: False + input_key: target_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + attn_type: vanilla-xformers + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity diff --git a/iopaint/model/original_sd_configs/sd_xl_refiner.yaml b/iopaint/model/original_sd_configs/sd_xl_refiner.yaml new file mode 100644 index 0000000..2d5ab44 --- /dev/null +++ b/iopaint/model/original_sd_configs/sd_xl_refiner.yaml @@ -0,0 +1,86 @@ +model: + target: sgm.models.diffusion.DiffusionEngine + params: + scale_factor: 0.13025 + disable_first_stage_autocast: True + + denoiser_config: + target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser + params: + num_idx: 1000 + + scaling_config: + target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling + discretization_config: + target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization + + network_config: + target: sgm.modules.diffusionmodules.openaimodel.UNetModel + params: + adm_in_channels: 2560 + num_classes: sequential + use_checkpoint: True + in_channels: 4 + out_channels: 4 + model_channels: 384 + attention_resolutions: [4, 2] + num_res_blocks: 2 + channel_mult: [1, 2, 4, 4] + num_head_channels: 64 + use_linear_in_transformer: True + transformer_depth: 4 + context_dim: [1280, 1280, 1280, 1280] + spatial_transformer_attn_type: softmax-xformers + + conditioner_config: + target: sgm.modules.GeneralConditioner + params: + emb_models: + - is_trainable: False + input_key: txt + target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2 + params: + arch: ViT-bigG-14 + version: laion2b_s39b_b160k + legacy: False + freeze: True + layer: penultimate + always_return_pooled: True + + - is_trainable: False + input_key: original_size_as_tuple + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + - is_trainable: False + input_key: crop_coords_top_left + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + - is_trainable: False + input_key: aesthetic_score + target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND + params: + outdim: 256 + + first_stage_config: + target: sgm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + attn_type: vanilla-xformers + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: [1, 2, 4, 4] + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity diff --git a/iopaint/model/original_sd_configs/v1-inference.yaml b/iopaint/model/original_sd_configs/v1-inference.yaml new file mode 100644 index 0000000..d4effe5 --- /dev/null +++ b/iopaint/model/original_sd_configs/v1-inference.yaml @@ -0,0 +1,70 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/iopaint/model/original_sd_configs/v2-inference-v.yaml b/iopaint/model/original_sd_configs/v2-inference-v.yaml new file mode 100644 index 0000000..8ec8dfb --- /dev/null +++ b/iopaint/model/original_sd_configs/v2-inference-v.yaml @@ -0,0 +1,68 @@ +model: + base_learning_rate: 1.0e-4 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + parameterization: "v" + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False # we set this to false because this is an inference only config + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + use_fp16: True + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + #attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate" diff --git a/iopaint/model/paint_by_example.py b/iopaint/model/paint_by_example.py new file mode 100644 index 0000000..bf1e5b7 --- /dev/null +++ b/iopaint/model/paint_by_example.py @@ -0,0 +1,68 @@ +import PIL +import PIL.Image +import cv2 +import torch +from loguru import logger + +from iopaint.helper import decode_base64_to_image +from .base import DiffusionInpaintModel +from iopaint.schema import InpaintRequest +from .utils import get_torch_dtype, enable_low_mem, is_local_files_only + + +class PaintByExample(DiffusionInpaintModel): + name = "Fantasy-Studio/Paint-by-Example" + pad_mod = 8 + min_size = 512 + + def init_model(self, device: torch.device, **kwargs): + from diffusers import DiffusionPipeline + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + model_kwargs = { + "local_files_only": is_local_files_only(**kwargs), + } + + if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): + logger.info("Disable Paint By Example Model NSFW checker") + model_kwargs.update( + dict(safety_checker=None, requires_safety_checker=False) + ) + + self.model = DiffusionPipeline.from_pretrained( + self.name, torch_dtype=torch_dtype, **model_kwargs + ) + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + # TODO: gpu_id + if kwargs.get("cpu_offload", False) and use_gpu: + self.model.image_encoder = self.model.image_encoder.to(device) + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + if config.paint_by_example_example_image is None: + raise ValueError("paint_by_example_example_image is required") + example_image, _, _ = decode_base64_to_image( + config.paint_by_example_example_image + ) + output = self.model( + image=PIL.Image.fromarray(image), + mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"), + example_image=PIL.Image.fromarray(example_image), + num_inference_steps=config.sd_steps, + guidance_scale=config.sd_guidance_scale, + negative_prompt="out of frame, lowres, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, disfigured, gross proportions, malformed limbs, watermark, signature", + output_type="np.array", + generator=torch.manual_seed(config.sd_seed), + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/lama_cleaner/model/plms_sampler.py b/iopaint/model/plms_sampler.py similarity index 99% rename from lama_cleaner/model/plms_sampler.py rename to iopaint/model/plms_sampler.py index d9c0416..131a8f4 100644 --- a/lama_cleaner/model/plms_sampler.py +++ b/iopaint/model/plms_sampler.py @@ -1,7 +1,7 @@ # From: https://github.com/CompVis/latent-diffusion/blob/main/ldm/models/diffusion/plms.py import torch import numpy as np -from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like +from .utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like from tqdm import tqdm diff --git a/iopaint/model/power_paint/__init__.py b/iopaint/model/power_paint/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/iopaint/model/power_paint/pipeline_powerpaint.py b/iopaint/model/power_paint/pipeline_powerpaint.py new file mode 100644 index 0000000..13c1d27 --- /dev/null +++ b/iopaint/model/power_paint/pipeline_powerpaint.py @@ -0,0 +1,1243 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer +from diffusers.configuration_utils import FrozenDict +from diffusers.image_processor import VaeImageProcessor +from diffusers.loaders import ( + FromSingleFileMixin, + LoraLoaderMixin, + TextualInversionLoaderMixin, +) +from diffusers.models import ( + AsymmetricAutoencoderKL, + AutoencoderKL, + UNet2DConditionModel, +) +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import ( + deprecate, + is_accelerate_available, + is_accelerate_version, + logging, +) +from diffusers.utils.torch_utils import randn_tensor +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.safety_checker import ( + StableDiffusionSafetyChecker, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def prepare_mask_and_masked_image( + image, mask, height, width, return_image: bool = False +): + """ + Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the + ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch single image + if image.ndim == 3: + assert ( + image.shape[0] == 3 + ), "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert ( + image.ndim == 4 and mask.ndim == 4 + ), "Image and Mask must have 4 dimensions" + assert ( + image.shape[-2:] == mask.shape[-2:] + ), "Image and Mask must have the same spatial dimensions" + assert ( + image.shape[0] == mask.shape[0] + ), "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError( + f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not" + ) + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [ + i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image + ] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + masked_image = image * (mask < 0.5) + + # n.b. ensure backwards compatibility as old function does not return image + if return_image: + return mask, masked_image, image + + return mask, masked_image + + +class StableDiffusionInpaintPipeline( + DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin +): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`, `AsymmetricAutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: Union[AutoencoderKL, AsymmetricAutoencoderKL], + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if ( + hasattr(scheduler.config, "steps_offset") + and scheduler.config.steps_offset != 1 + ): + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate( + "steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False + ) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if ( + hasattr(scheduler.config, "skip_prk_steps") + and scheduler.config.skip_prk_steps is False + ): + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration" + " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" + " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" + " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" + " Hub, it would be very nice if you could open a Pull request for the" + " `scheduler/scheduler_config.json` file" + ) + deprecate( + "skip_prk_steps not set", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + new_config = dict(scheduler.config) + new_config["skip_prk_steps"] = True + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr( + unet.config, "_diffusers_version" + ) and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse( + "0.9.0.dev0" + ) + is_unet_sample_size_less_64 = ( + hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + ) + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate( + "sample_size<64", "1.0.0", deprecation_message, standard_warn=False + ) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4 + if unet.config.in_channels != 9: + logger.info( + f"You have loaded a UNet with {unet.config.in_channels} input channels which." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a + time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. + Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the + iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError( + "`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." + ) + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + hook = None + for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: + _, hook = cpu_offload_with_hook( + cpu_offloaded_model, device, prev_module_hook=hook + ) + + if self.safety_checker is not None: + _, hook = cpu_offload_with_hook( + self.safety_checker, device, prev_module_hook=hook + ) + + # We'll offload the last model manually. + self.final_offload_hook = hook + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + promptA, + promptB, + t, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_promptA=None, + negative_promptB=None, + t_nag=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + prompt = promptA + negative_prompt = negative_promptA + + if promptA is not None and isinstance(promptA, str): + batch_size = 1 + elif promptA is not None and isinstance(promptA, list): + batch_size = len(promptA) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + promptA = self.maybe_convert_prompt(promptA, self.tokenizer) + + text_inputsA = self.tokenizer( + promptA, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_inputsB = self.tokenizer( + promptB, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_idsA = text_inputsA.input_ids + text_input_idsB = text_inputsB.input_ids + untruncated_ids = self.tokenizer( + promptA, padding="longest", return_tensors="pt" + ).input_ids + + if untruncated_ids.shape[-1] >= text_input_idsA.shape[ + -1 + ] and not torch.equal(text_input_idsA, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if ( + hasattr(self.text_encoder.config, "use_attention_mask") + and self.text_encoder.config.use_attention_mask + ): + attention_mask = text_inputsA.attention_mask.to(device) + else: + attention_mask = None + + # print("text_input_idsA: ",text_input_idsA) + # print("text_input_idsB: ",text_input_idsB) + # print('t: ',t) + + prompt_embedsA = self.text_encoder( + text_input_idsA.to(device), + attention_mask=attention_mask, + ) + prompt_embedsA = prompt_embedsA[0] + + prompt_embedsB = self.text_encoder( + text_input_idsB.to(device), + attention_mask=attention_mask, + ) + prompt_embedsB = prompt_embedsB[0] + prompt_embeds = prompt_embedsA * (t) + (1 - t) * prompt_embedsB + # print("prompt_embeds: ",prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view( + bs_embed * num_images_per_prompt, seq_len, -1 + ) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokensA: List[str] + uncond_tokensB: List[str] + if negative_prompt is None: + uncond_tokensA = [""] * batch_size + uncond_tokensB = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokensA = [negative_promptA] + uncond_tokensB = [negative_promptB] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokensA = negative_promptA + uncond_tokensB = negative_promptB + + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokensA = self.maybe_convert_prompt( + uncond_tokensA, self.tokenizer + ) + uncond_tokensB = self.maybe_convert_prompt( + uncond_tokensB, self.tokenizer + ) + + max_length = prompt_embeds.shape[1] + uncond_inputA = self.tokenizer( + uncond_tokensA, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + uncond_inputB = self.tokenizer( + uncond_tokensB, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if ( + hasattr(self.text_encoder.config, "use_attention_mask") + and self.text_encoder.config.use_attention_mask + ): + attention_mask = uncond_inputA.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embedsA = self.text_encoder( + uncond_inputA.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embedsB = self.text_encoder( + uncond_inputB.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = ( + negative_prompt_embedsA[0] * (t_nag) + + (1 - t_nag) * negative_prompt_embedsB[0] + ) + + # negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to( + dtype=prompt_embeds_dtype, device=device + ) + + negative_prompt_embeds = negative_prompt_embeds.repeat( + 1, num_images_per_prompt, 1 + ) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + # print("prompt_embeds: ",prompt_embeds) + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess( + image, output_type="pil" + ) + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor( + feature_extractor_input, return_tensors="pt" + ).to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set( + inspect.signature(self.scheduler.step).parameters.keys() + ) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set( + inspect.signature(self.scheduler.step).parameters.keys() + ) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if strength < 0 or strength > 1: + raise ValueError( + f"The value of strength should in [0.0, 1.0] but is {strength}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError( + f"`height` and `width` have to be divisible by 8 but are {height} and {width}." + ) + + if (callback_steps is None) or ( + callback_steps is not None + and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and ( + not isinstance(prompt, str) and not isinstance(prompt, list) + ): + raise ValueError( + f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + return_noise=False, + return_image_latents=False, + ): + shape = ( + batch_size, + num_channels_latents, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + image_latents = self._encode_vae_image(image=image, generator=generator) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = ( + noise + if is_strength_max + else self.scheduler.add_noise(image_latents, noise, timestep) + ) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = ( + latents * self.scheduler.init_noise_sigma + if is_strength_max + else latents + ) + else: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample( + generator=generator[i] + ) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = self.vae.encode(image).latent_dist.sample( + generator=generator + ) + + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def prepare_mask_latents( + self, + mask, + masked_image, + batch_size, + height, + width, + dtype, + device, + generator, + do_classifier_free_guidance, + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + masked_image = masked_image.to(device=device, dtype=dtype) + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat( + batch_size // masked_image_latents.shape[0], 1, 1, 1 + ) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) + if do_classifier_free_guidance + else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + @torch.no_grad() + def __call__( + self, + promptA: Union[str, List[str]] = None, + promptB: Union[str, List[str]] = None, + image: Union[torch.FloatTensor, PIL.Image.Image] = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 1.0, + tradoff: float = 1.0, + tradoff_nag: float = 1.0, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_promptA: Optional[Union[str, List[str]]] = None, + negative_promptB: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + task_class: Union[torch.Tensor, float, int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to be inpainted (which parts of the image to be masked + out with `mask_image` and repainted according to `prompt`). + mask_image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted + while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel + (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the + expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionInpaintPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = download_image(img_url).resize((512, 512)) + >>> mask_image = download_image(mask_url).resize((512, 512)) + + >>> pipe = StableDiffusionInpaintPipeline.from_pretrained( + ... "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + prompt = promptA + negative_prompt = negative_promptA + # 1. Check inputs + self.check_inputs( + prompt, + height, + width, + strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) + if cross_attention_kwargs is not None + else None + ) + prompt_embeds = self._encode_prompt( + promptA, + promptB, + tradoff, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_promptA, + negative_promptB, + tradoff_nag, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + + # 4. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps=num_inference_steps, strength=strength, device=device + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 5. Preprocess mask and image + mask, masked_image, init_image = prepare_mask_and_masked_image( + image, mask_image, height, width, return_image=True + ) + mask_condition = mask.clone() + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if ( + num_channels_latents + num_channels_mask + num_channels_masked_image + != self.unet.config.in_channels + ): + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 10. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([latents] * 2) if do_classifier_free_guidance else latents + ) + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input( + latent_model_input, t + ) + + if num_channels_unet == 9: + latent_model_input = torch.cat( + [latent_model_input, mask, masked_image_latents], dim=1 + ) + + # predict the noise residual + if task_class is not None: + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + task_class=task_class, + )[0] + else: + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, t, latents, **extra_step_kwargs, return_dict=False + )[0] + + if num_channels_unet == 4: + init_latents_proper = image_latents[:1] + init_mask = mask[:1] + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = ( + 1 - init_mask + ) * init_latents_proper + init_mask * latents + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(self, i, t, {}) + + if not output_type == "latent": + condition_kwargs = {} + if isinstance(self.vae, AsymmetricAutoencoderKL): + init_image = init_image.to( + device=device, dtype=masked_image_latents.dtype + ) + init_image_condition = init_image.clone() + init_image = self._encode_vae_image(init_image, generator=generator) + mask_condition = mask_condition.to( + device=device, dtype=masked_image_latents.dtype + ) + condition_kwargs = { + "image": init_image_condition, + "mask": mask_condition, + } + image = self.vae.decode( + latents / self.vae.config.scaling_factor, + return_dict=False, + **condition_kwargs, + )[0] + image, has_nsfw_concept = self.run_safety_checker( + image, device, prompt_embeds.dtype + ) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess( + image, output_type=output_type, do_denormalize=do_denormalize + ) + + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput( + images=image, nsfw_content_detected=has_nsfw_concept + ) diff --git a/iopaint/model/power_paint/pipeline_powerpaint_controlnet.py b/iopaint/model/power_paint/pipeline_powerpaint_controlnet.py new file mode 100644 index 0000000..cba0f8f --- /dev/null +++ b/iopaint/model/power_paint/pipeline_powerpaint_controlnet.py @@ -0,0 +1,1775 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ + +import inspect +import warnings +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from diffusers.image_processor import VaeImageProcessor +from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin +from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import ( + is_accelerate_available, + is_accelerate_version, + logging, + replace_example_docstring, +) +from diffusers.utils.torch_utils import randn_tensor,is_compiled_module +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from diffusers.pipelines.controlnet import MultiControlNetModel + + + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install transformers accelerate + >>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" + ... ) + >>> init_image = init_image.resize((512, 512)) + + >>> generator = torch.Generator(device="cpu").manual_seed(1) + + >>> mask_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" + ... ) + >>> mask_image = mask_image.resize((512, 512)) + + + >>> def make_inpaint_condition(image, image_mask): + ... image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 + ... image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0 + + ... assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size" + ... image[image_mask > 0.5] = -1.0 # set as masked pixel + ... image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) + ... image = torch.from_numpy(image) + ... return image + + + >>> control_image = make_inpaint_condition(init_image, mask_image) + + >>> controlnet = ControlNetModel.from_pretrained( + ... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16 + ... ) + >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + + >>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> image = pipe( + ... "a handsome man with ray-ban sunglasses", + ... num_inference_steps=20, + ... generator=generator, + ... eta=1.0, + ... image=init_image, + ... mask_image=mask_image, + ... control_image=control_image, + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.prepare_mask_and_masked_image +def prepare_mask_and_masked_image(image, mask, height, width, return_image=False): + """ + Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the + ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") + + # Batch single image + if image.ndim == 3: + assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + masked_image = image * (mask < 0.5) + + # n.b. ensure backwards compatibility as old function does not return image + if return_image: + return mask, masked_image, image + + return mask, masked_image + + +class StableDiffusionControlNetInpaintPipeline( + DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin +): + r""" + Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + In addition the pipeline inherits the following loading methods: + - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`] + + + + This pipeline can be used both with checkpoints that have been specifically fine-tuned for inpainting, such as + [runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting) + as well as default text-to-image stable diffusion checkpoints, such as + [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5). + Default text-to-image stable diffusion checkpoints might be preferable for controlnets that have been fine-tuned on + those, such as [lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint). + + + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets + as a list, the outputs from each ControlNet are added together to create one combined additional + conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + device = torch.device(f"cuda:{gpu_id}") + + hook = None + for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + if self.safety_checker is not None: + # the safety checker can offload the vae again + _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) + + # control net hook has be manually offloaded as it alternates with unet + cpu_offload_with_hook(self.controlnet, device) + + # We'll offload the last model manually. + self.final_offload_hook = hook + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + promptA, + promptB, + t, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_promptA=None, + negative_promptB=None, + t_nag = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + prompt = promptA + negative_prompt = negative_promptA + + if promptA is not None and isinstance(promptA, str): + batch_size = 1 + elif promptA is not None and isinstance(promptA, list): + batch_size = len(promptA) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + promptA = self.maybe_convert_prompt(promptA, self.tokenizer) + + text_inputsA = self.tokenizer( + promptA, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_inputsB = self.tokenizer( + promptB, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_idsA = text_inputsA.input_ids + text_input_idsB = text_inputsB.input_ids + untruncated_ids = self.tokenizer(promptA, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_idsA.shape[-1] and not torch.equal( + text_input_idsA, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputsA.attention_mask.to(device) + else: + attention_mask = None + + # print("text_input_idsA: ",text_input_idsA) + # print("text_input_idsB: ",text_input_idsB) + # print('t: ',t) + + prompt_embedsA = self.text_encoder( + text_input_idsA.to(device), + attention_mask=attention_mask, + ) + prompt_embedsA = prompt_embedsA[0] + + prompt_embedsB = self.text_encoder( + text_input_idsB.to(device), + attention_mask=attention_mask, + ) + prompt_embedsB = prompt_embedsB[0] + prompt_embeds = prompt_embedsA*(t)+(1-t)*prompt_embedsB + # print("prompt_embeds: ",prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokensA: List[str] + uncond_tokensB: List[str] + if negative_prompt is None: + uncond_tokensA = [""] * batch_size + uncond_tokensB = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokensA = [negative_promptA] + uncond_tokensB = [negative_promptB] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokensA = negative_promptA + uncond_tokensB = negative_promptB + + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokensA = self.maybe_convert_prompt(uncond_tokensA, self.tokenizer) + uncond_tokensB = self.maybe_convert_prompt(uncond_tokensB, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_inputA = self.tokenizer( + uncond_tokensA, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + uncond_inputB = self.tokenizer( + uncond_tokensB, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_inputA.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embedsA = self.text_encoder( + uncond_inputA.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embedsB = self.text_encoder( + uncond_inputB.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embedsA[0]*(t_nag)+(1-t_nag)*negative_prompt_embedsB[0] + + # negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + # print("prompt_embeds: ",prompt_embeds) + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + warnings.warn( + "The decode_latents method is deprecated and will be removed in a future version. Please" + " use VaeImageProcessor instead", + FutureWarning, + ) + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + def check_inputs( + self, + prompt, + image, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + return_noise=False, + return_image_latents=False, + ): + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + image_latents = self._encode_vae_image(image=image, generator=generator) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + else: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _default_height_width(self, height, width, image): + # NOTE: It is possible that a list of images have different + # dimensions for each image, so just checking the first image + # is not _exactly_ correct, but it is simple. + while isinstance(image, list): + image = image[0] + + if height is None: + if isinstance(image, PIL.Image.Image): + height = image.height + elif isinstance(image, torch.Tensor): + height = image.shape[2] + + height = (height // 8) * 8 # round down to nearest multiple of 8 + + if width is None: + if isinstance(image, PIL.Image.Image): + width = image.width + elif isinstance(image, torch.Tensor): + width = image.shape[3] + + width = (width // 8) * 8 # round down to nearest multiple of 8 + + return height, width + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + masked_image = masked_image.to(device=device, dtype=dtype) + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = self.vae.encode(image).latent_dist.sample(generator=generator) + + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + @torch.no_grad() + def predict_woControl( + self, + promptA: Union[str, List[str]] = None, + promptB: Union[str, List[str]] = None, + image: Union[torch.FloatTensor, PIL.Image.Image] = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 1.0, + tradoff: float = 1.0, + tradoff_nag: float = 1.0, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_promptA: Optional[Union[str, List[str]]] = None, + negative_promptB: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + task_class: Union[torch.Tensor, float, int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to be inpainted (which parts of the image to be masked + out with `mask_image` and repainted according to `prompt`). + mask_image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted + while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel + (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the + expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionInpaintPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = download_image(img_url).resize((512, 512)) + >>> mask_image = download_image(mask_url).resize((512, 512)) + + >>> pipe = StableDiffusionInpaintPipeline.from_pretrained( + ... "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + prompt = promptA + negative_prompt = negative_promptA + # 1. Check inputs + self.check_inputs( + prompt, + height, + width, + strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds = self._encode_prompt( + promptA, + promptB, + tradoff, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_promptA, + negative_promptB, + tradoff_nag, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + + # 4. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps=num_inference_steps, strength=strength, device=device + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 5. Preprocess mask and image + mask, masked_image, init_image = prepare_mask_and_masked_image( + image, mask_image, height, width, return_image=True + ) + mask_condition = mask.clone() + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 10. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + # predict the noise residual + if task_class is not None: + noise_pred = self.unet( + sample = latent_model_input, + timestep = t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + task_class = task_class, + )[0] + else: + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if num_channels_unet == 4: + init_latents_proper = image_latents[:1] + init_mask = mask[:1] + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + if not output_type == "latent": + condition_kwargs = {} + if isinstance(self.vae, AsymmetricAutoencoderKL): + init_image = init_image.to(device=device, dtype=masked_image_latents.dtype) + init_image_condition = init_image.clone() + init_image = self._encode_vae_image(init_image, generator=generator) + mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype) + condition_kwargs = {"image": init_image_condition, "mask": mask_condition} + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, **condition_kwargs)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + promptA: Union[str, List[str]] = None, + promptB: Union[str, List[str]] = None, + image: Union[torch.Tensor, PIL.Image.Image] = None, + mask_image: Union[torch.Tensor, PIL.Image.Image] = None, + control_image: Union[ + torch.FloatTensor, + PIL.Image.Image, + np.ndarray, + List[torch.FloatTensor], + List[PIL.Image.Image], + List[np.ndarray], + ] = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 1.0, + tradoff: float = 1.0, + tradoff_nag: float = 1.0, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_promptA: Optional[Union[str, List[str]]] = None, + negative_promptB: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.5, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, + `List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`): + The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If + the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can + also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If + height and/or width are passed, `image` is resized according to them. If multiple ControlNets are + specified in init, images must be passed as a list such that each element of the list can be correctly + batched for input to a single controlnet. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + strength (`float`, *optional*, defaults to 1.): + Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be + between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the + `strength`. The number of denoising steps depends on the amount of noise initially added. When + `strength` is 1, added noise will be maximum and the denoising process will run for the full number of + iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked + portion of the reference `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 0.5): + The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original unet. If multiple ControlNets are specified in init, you can set the + corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting + than for [`~StableDiffusionControlNetPipeline.__call__`]. + guess_mode (`bool`, *optional*, defaults to `False`): + In this mode, the ControlNet encoder will try best to recognize the content of the input image even if + you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the controlnet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the controlnet stops applying. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # 0. Default height and width to unet + height, width = self._default_height_width(height, width, image) + + prompt = promptA + negative_prompt = negative_promptA + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [ + control_guidance_end + ] + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + control_image, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds = self._encode_prompt( + promptA, + promptB, + tradoff, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_promptA, + negative_promptB, + tradoff_nag, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=do_classifier_free_guidance, + guess_mode=guess_mode, + ) + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + else: + assert False + + # 4. Preprocess mask and image - resizes image and mask w.r.t height and width + mask, masked_image, init_image = prepare_mask_and_masked_image( + image, mask_image, height, width, return_image=True + ) + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps=num_inference_steps, strength=strength, device=device + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + do_classifier_free_guidance, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + if guess_mode and do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + if guess_mode and do_classifier_free_guidance: + # Infered ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + # predict the noise residual + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if num_channels_unet == 4: + init_latents_proper = image_latents[:1] + init_mask = mask[:1] + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/iopaint/model/power_paint/power_paint.py b/iopaint/model/power_paint/power_paint.py new file mode 100644 index 0000000..f17a5a3 --- /dev/null +++ b/iopaint/model/power_paint/power_paint.py @@ -0,0 +1,101 @@ +from PIL import Image +import PIL.Image +import cv2 +import torch +from loguru import logger + +from ..base import DiffusionInpaintModel +from ..helper.cpu_text_encoder import CPUTextEncoderWrapper +from ..utils import ( + handle_from_pretrained_exceptions, + get_torch_dtype, + enable_low_mem, + is_local_files_only, +) +from iopaint.schema import InpaintRequest +from .powerpaint_tokenizer import add_task_to_prompt +from ...const import POWERPAINT_NAME + + +class PowerPaint(DiffusionInpaintModel): + name = POWERPAINT_NAME + pad_mod = 8 + min_size = 512 + lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5" + + def init_model(self, device: torch.device, **kwargs): + from .pipeline_powerpaint import StableDiffusionInpaintPipeline + from .powerpaint_tokenizer import PowerPaintTokenizer + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + model_kwargs = {"local_files_only": is_local_files_only(**kwargs)} + if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + self.model = handle_from_pretrained_exceptions( + StableDiffusionInpaintPipeline.from_pretrained, + pretrained_model_name_or_path=self.name, + variant="fp16", + torch_dtype=torch_dtype, + **model_kwargs, + ) + self.model.tokenizer = PowerPaintTokenizer(self.model.tokenizer) + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + img_h, img_w = image.shape[:2] + promptA, promptB, negative_promptA, negative_promptB = add_task_to_prompt( + config.prompt, config.negative_prompt, config.powerpaint_task + ) + + output = self.model( + image=PIL.Image.fromarray(image), + promptA=promptA, + promptB=promptB, + tradoff=config.fitting_degree, + tradoff_nag=config.fitting_degree, + negative_promptA=negative_promptA, + negative_promptB=negative_promptB, + mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"), + num_inference_steps=config.sd_steps, + strength=config.sd_strength, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + callback_steps=1, + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/iopaint/model/power_paint/powerpaint_tokenizer.py b/iopaint/model/power_paint/powerpaint_tokenizer.py new file mode 100644 index 0000000..39d5cb7 --- /dev/null +++ b/iopaint/model/power_paint/powerpaint_tokenizer.py @@ -0,0 +1,540 @@ +import torch +import torch.nn as nn +import copy +import random +from typing import Any, List, Optional, Union +from transformers import CLIPTokenizer + +from iopaint.schema import PowerPaintTask + + +def add_task_to_prompt(prompt, negative_prompt, task: PowerPaintTask): + if task == PowerPaintTask.object_remove: + promptA = prompt + " P_ctxt" + promptB = prompt + " P_ctxt" + negative_promptA = negative_prompt + " P_obj" + negative_promptB = negative_prompt + " P_obj" + elif task == PowerPaintTask.shape_guided: + promptA = prompt + " P_shape" + promptB = prompt + " P_ctxt" + negative_promptA = negative_prompt + negative_promptB = negative_prompt + elif task == PowerPaintTask.outpainting: + promptA = prompt + " P_ctxt" + promptB = prompt + " P_ctxt" + negative_promptA = negative_prompt + " P_obj" + negative_promptB = negative_prompt + " P_obj" + else: + promptA = prompt + " P_obj" + promptB = prompt + " P_obj" + negative_promptA = negative_prompt + negative_promptB = negative_prompt + + return promptA, promptB, negative_promptA, negative_promptB + + +class PowerPaintTokenizer: + def __init__(self, tokenizer: CLIPTokenizer): + self.wrapped = tokenizer + self.token_map = {} + placeholder_tokens = ["P_ctxt", "P_shape", "P_obj"] + num_vec_per_token = 10 + for placeholder_token in placeholder_tokens: + output = [] + for i in range(num_vec_per_token): + ith_token = placeholder_token + f"_{i}" + output.append(ith_token) + self.token_map[placeholder_token] = output + + def __getattr__(self, name: str) -> Any: + if name == "wrapped": + return super().__getattr__("wrapped") + + try: + return getattr(self.wrapped, name) + except AttributeError: + try: + return super().__getattr__(name) + except AttributeError: + raise AttributeError( + "'name' cannot be found in both " + f"'{self.__class__.__name__}' and " + f"'{self.__class__.__name__}.tokenizer'." + ) + + def try_adding_tokens(self, tokens: Union[str, List[str]], *args, **kwargs): + """Attempt to add tokens to the tokenizer. + + Args: + tokens (Union[str, List[str]]): The tokens to be added. + """ + num_added_tokens = self.wrapped.add_tokens(tokens, *args, **kwargs) + assert num_added_tokens != 0, ( + f"The tokenizer already contains the token {tokens}. Please pass " + "a different `placeholder_token` that is not already in the " + "tokenizer." + ) + + def get_token_info(self, token: str) -> dict: + """Get the information of a token, including its start and end index in + the current tokenizer. + + Args: + token (str): The token to be queried. + + Returns: + dict: The information of the token, including its start and end + index in current tokenizer. + """ + token_ids = self.__call__(token).input_ids + start, end = token_ids[1], token_ids[-2] + 1 + return {"name": token, "start": start, "end": end} + + def add_placeholder_token( + self, placeholder_token: str, *args, num_vec_per_token: int = 1, **kwargs + ): + """Add placeholder tokens to the tokenizer. + + Args: + placeholder_token (str): The placeholder token to be added. + num_vec_per_token (int, optional): The number of vectors of + the added placeholder token. + *args, **kwargs: The arguments for `self.wrapped.add_tokens`. + """ + output = [] + if num_vec_per_token == 1: + self.try_adding_tokens(placeholder_token, *args, **kwargs) + output.append(placeholder_token) + else: + output = [] + for i in range(num_vec_per_token): + ith_token = placeholder_token + f"_{i}" + self.try_adding_tokens(ith_token, *args, **kwargs) + output.append(ith_token) + + for token in self.token_map: + if token in placeholder_token: + raise ValueError( + f"The tokenizer already has placeholder token {token} " + f"that can get confused with {placeholder_token} " + "keep placeholder tokens independent" + ) + self.token_map[placeholder_token] = output + + def replace_placeholder_tokens_in_text( + self, + text: Union[str, List[str]], + vector_shuffle: bool = False, + prop_tokens_to_load: float = 1.0, + ) -> Union[str, List[str]]: + """Replace the keywords in text with placeholder tokens. This function + will be called in `self.__call__` and `self.encode`. + + Args: + text (Union[str, List[str]]): The text to be processed. + vector_shuffle (bool, optional): Whether to shuffle the vectors. + Defaults to False. + prop_tokens_to_load (float, optional): The proportion of tokens to + be loaded. If 1.0, all tokens will be loaded. Defaults to 1.0. + + Returns: + Union[str, List[str]]: The processed text. + """ + if isinstance(text, list): + output = [] + for i in range(len(text)): + output.append( + self.replace_placeholder_tokens_in_text( + text[i], vector_shuffle=vector_shuffle + ) + ) + return output + + for placeholder_token in self.token_map: + if placeholder_token in text: + tokens = self.token_map[placeholder_token] + tokens = tokens[: 1 + int(len(tokens) * prop_tokens_to_load)] + if vector_shuffle: + tokens = copy.copy(tokens) + random.shuffle(tokens) + text = text.replace(placeholder_token, " ".join(tokens)) + return text + + def replace_text_with_placeholder_tokens( + self, text: Union[str, List[str]] + ) -> Union[str, List[str]]: + """Replace the placeholder tokens in text with the original keywords. + This function will be called in `self.decode`. + + Args: + text (Union[str, List[str]]): The text to be processed. + + Returns: + Union[str, List[str]]: The processed text. + """ + if isinstance(text, list): + output = [] + for i in range(len(text)): + output.append(self.replace_text_with_placeholder_tokens(text[i])) + return output + + for placeholder_token, tokens in self.token_map.items(): + merged_tokens = " ".join(tokens) + if merged_tokens in text: + text = text.replace(merged_tokens, placeholder_token) + return text + + def __call__( + self, + text: Union[str, List[str]], + *args, + vector_shuffle: bool = False, + prop_tokens_to_load: float = 1.0, + **kwargs, + ): + """The call function of the wrapper. + + Args: + text (Union[str, List[str]]): The text to be tokenized. + vector_shuffle (bool, optional): Whether to shuffle the vectors. + Defaults to False. + prop_tokens_to_load (float, optional): The proportion of tokens to + be loaded. If 1.0, all tokens will be loaded. Defaults to 1.0 + *args, **kwargs: The arguments for `self.wrapped.__call__`. + """ + replaced_text = self.replace_placeholder_tokens_in_text( + text, vector_shuffle=vector_shuffle, prop_tokens_to_load=prop_tokens_to_load + ) + + return self.wrapped.__call__(replaced_text, *args, **kwargs) + + def encode(self, text: Union[str, List[str]], *args, **kwargs): + """Encode the passed text to token index. + + Args: + text (Union[str, List[str]]): The text to be encode. + *args, **kwargs: The arguments for `self.wrapped.__call__`. + """ + replaced_text = self.replace_placeholder_tokens_in_text(text) + return self.wrapped(replaced_text, *args, **kwargs) + + def decode( + self, token_ids, return_raw: bool = False, *args, **kwargs + ) -> Union[str, List[str]]: + """Decode the token index to text. + + Args: + token_ids: The token index to be decoded. + return_raw: Whether keep the placeholder token in the text. + Defaults to False. + *args, **kwargs: The arguments for `self.wrapped.decode`. + + Returns: + Union[str, List[str]]: The decoded text. + """ + text = self.wrapped.decode(token_ids, *args, **kwargs) + if return_raw: + return text + replaced_text = self.replace_text_with_placeholder_tokens(text) + return replaced_text + + +class EmbeddingLayerWithFixes(nn.Module): + """The revised embedding layer to support external embeddings. This design + of this class is inspired by https://github.com/AUTOMATIC1111/stable- + diffusion-webui/blob/22bcc7be428c94e9408f589966c2040187245d81/modules/sd_hi + jack.py#L224 # noqa. + + Args: + wrapped (nn.Emebdding): The embedding layer to be wrapped. + external_embeddings (Union[dict, List[dict]], optional): The external + embeddings added to this layer. Defaults to None. + """ + + def __init__( + self, + wrapped: nn.Embedding, + external_embeddings: Optional[Union[dict, List[dict]]] = None, + ): + super().__init__() + self.wrapped = wrapped + self.num_embeddings = wrapped.weight.shape[0] + + self.external_embeddings = [] + if external_embeddings: + self.add_embeddings(external_embeddings) + + self.trainable_embeddings = nn.ParameterDict() + + @property + def weight(self): + """Get the weight of wrapped embedding layer.""" + return self.wrapped.weight + + def check_duplicate_names(self, embeddings: List[dict]): + """Check whether duplicate names exist in list of 'external + embeddings'. + + Args: + embeddings (List[dict]): A list of embedding to be check. + """ + names = [emb["name"] for emb in embeddings] + assert len(names) == len(set(names)), ( + "Found duplicated names in 'external_embeddings'. Name list: " f"'{names}'" + ) + + def check_ids_overlap(self, embeddings): + """Check whether overlap exist in token ids of 'external_embeddings'. + + Args: + embeddings (List[dict]): A list of embedding to be check. + """ + ids_range = [[emb["start"], emb["end"], emb["name"]] for emb in embeddings] + ids_range.sort() # sort by 'start' + # check if 'end' has overlapping + for idx in range(len(ids_range) - 1): + name1, name2 = ids_range[idx][-1], ids_range[idx + 1][-1] + assert ids_range[idx][1] <= ids_range[idx + 1][0], ( + f"Found ids overlapping between embeddings '{name1}' " f"and '{name2}'." + ) + + def add_embeddings(self, embeddings: Optional[Union[dict, List[dict]]]): + """Add external embeddings to this layer. + + Use case: + + >>> 1. Add token to tokenizer and get the token id. + >>> tokenizer = TokenizerWrapper('openai/clip-vit-base-patch32') + >>> # 'how much' in kiswahili + >>> tokenizer.add_placeholder_tokens('ngapi', num_vec_per_token=4) + >>> + >>> 2. Add external embeddings to the model. + >>> new_embedding = { + >>> 'name': 'ngapi', # 'how much' in kiswahili + >>> 'embedding': torch.ones(1, 15) * 4, + >>> 'start': tokenizer.get_token_info('kwaheri')['start'], + >>> 'end': tokenizer.get_token_info('kwaheri')['end'], + >>> 'trainable': False # if True, will registry as a parameter + >>> } + >>> embedding_layer = nn.Embedding(10, 15) + >>> embedding_layer_wrapper = EmbeddingLayerWithFixes(embedding_layer) + >>> embedding_layer_wrapper.add_embeddings(new_embedding) + >>> + >>> 3. Forward tokenizer and embedding layer! + >>> input_text = ['hello, ngapi!', 'hello my friend, ngapi?'] + >>> input_ids = tokenizer( + >>> input_text, padding='max_length', truncation=True, + >>> return_tensors='pt')['input_ids'] + >>> out_feat = embedding_layer_wrapper(input_ids) + >>> + >>> 4. Let's validate the result! + >>> assert (out_feat[0, 3: 7] == 2.3).all() + >>> assert (out_feat[2, 5: 9] == 2.3).all() + + Args: + embeddings (Union[dict, list[dict]]): The external embeddings to + be added. Each dict must contain the following 4 fields: 'name' + (the name of this embedding), 'embedding' (the embedding + tensor), 'start' (the start token id of this embedding), 'end' + (the end token id of this embedding). For example: + `{name: NAME, start: START, end: END, embedding: torch.Tensor}` + """ + if isinstance(embeddings, dict): + embeddings = [embeddings] + + self.external_embeddings += embeddings + self.check_duplicate_names(self.external_embeddings) + self.check_ids_overlap(self.external_embeddings) + + # set for trainable + added_trainable_emb_info = [] + for embedding in embeddings: + trainable = embedding.get("trainable", False) + if trainable: + name = embedding["name"] + embedding["embedding"] = torch.nn.Parameter(embedding["embedding"]) + self.trainable_embeddings[name] = embedding["embedding"] + added_trainable_emb_info.append(name) + + added_emb_info = [emb["name"] for emb in embeddings] + added_emb_info = ", ".join(added_emb_info) + print(f"Successfully add external embeddings: {added_emb_info}.", "current") + + if added_trainable_emb_info: + added_trainable_emb_info = ", ".join(added_trainable_emb_info) + print( + "Successfully add trainable external embeddings: " + f"{added_trainable_emb_info}", + "current", + ) + + def replace_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor: + """Replace external input ids to 0. + + Args: + input_ids (torch.Tensor): The input ids to be replaced. + + Returns: + torch.Tensor: The replaced input ids. + """ + input_ids_fwd = input_ids.clone() + input_ids_fwd[input_ids_fwd >= self.num_embeddings] = 0 + return input_ids_fwd + + def replace_embeddings( + self, input_ids: torch.Tensor, embedding: torch.Tensor, external_embedding: dict + ) -> torch.Tensor: + """Replace external embedding to the embedding layer. Noted that, in + this function we use `torch.cat` to avoid inplace modification. + + Args: + input_ids (torch.Tensor): The original token ids. Shape like + [LENGTH, ]. + embedding (torch.Tensor): The embedding of token ids after + `replace_input_ids` function. + external_embedding (dict): The external embedding to be replaced. + + Returns: + torch.Tensor: The replaced embedding. + """ + new_embedding = [] + + name = external_embedding["name"] + start = external_embedding["start"] + end = external_embedding["end"] + target_ids_to_replace = [i for i in range(start, end)] + ext_emb = external_embedding["embedding"] + + # do not need to replace + if not (input_ids == start).any(): + return embedding + + # start replace + s_idx, e_idx = 0, 0 + while e_idx < len(input_ids): + if input_ids[e_idx] == start: + if e_idx != 0: + # add embedding do not need to replace + new_embedding.append(embedding[s_idx:e_idx]) + + # check if the next embedding need to replace is valid + actually_ids_to_replace = [ + int(i) for i in input_ids[e_idx : e_idx + end - start] + ] + assert actually_ids_to_replace == target_ids_to_replace, ( + f"Invalid 'input_ids' in position: {s_idx} to {e_idx}. " + f"Expect '{target_ids_to_replace}' for embedding " + f"'{name}' but found '{actually_ids_to_replace}'." + ) + + new_embedding.append(ext_emb) + + s_idx = e_idx + end - start + e_idx = s_idx + 1 + else: + e_idx += 1 + + if e_idx == len(input_ids): + new_embedding.append(embedding[s_idx:e_idx]) + + return torch.cat(new_embedding, dim=0) + + def forward( + self, input_ids: torch.Tensor, external_embeddings: Optional[List[dict]] = None + ): + """The forward function. + + Args: + input_ids (torch.Tensor): The token ids shape like [bz, LENGTH] or + [LENGTH, ]. + external_embeddings (Optional[List[dict]]): The external + embeddings. If not passed, only `self.external_embeddings` + will be used. Defaults to None. + + input_ids: shape like [bz, LENGTH] or [LENGTH]. + """ + assert input_ids.ndim in [1, 2] + if input_ids.ndim == 1: + input_ids = input_ids.unsqueeze(0) + + if external_embeddings is None and not self.external_embeddings: + return self.wrapped(input_ids) + + input_ids_fwd = self.replace_input_ids(input_ids) + inputs_embeds = self.wrapped(input_ids_fwd) + + vecs = [] + + if external_embeddings is None: + external_embeddings = [] + elif isinstance(external_embeddings, dict): + external_embeddings = [external_embeddings] + embeddings = self.external_embeddings + external_embeddings + + for input_id, embedding in zip(input_ids, inputs_embeds): + new_embedding = embedding + for external_embedding in embeddings: + new_embedding = self.replace_embeddings( + input_id, new_embedding, external_embedding + ) + vecs.append(new_embedding) + + return torch.stack(vecs) + + +def add_tokens( + tokenizer, + text_encoder, + placeholder_tokens: list, + initialize_tokens: list = None, + num_vectors_per_token: int = 1, +): + """Add token for training. + + # TODO: support add tokens as dict, then we can load pretrained tokens. + """ + if initialize_tokens is not None: + assert len(initialize_tokens) == len( + placeholder_tokens + ), "placeholder_token should be the same length as initialize_token" + for ii in range(len(placeholder_tokens)): + tokenizer.add_placeholder_token( + placeholder_tokens[ii], num_vec_per_token=num_vectors_per_token + ) + + # text_encoder.set_embedding_layer() + embedding_layer = text_encoder.text_model.embeddings.token_embedding + text_encoder.text_model.embeddings.token_embedding = EmbeddingLayerWithFixes( + embedding_layer + ) + embedding_layer = text_encoder.text_model.embeddings.token_embedding + + assert embedding_layer is not None, ( + "Do not support get embedding layer for current text encoder. " + "Please check your configuration." + ) + initialize_embedding = [] + if initialize_tokens is not None: + for ii in range(len(placeholder_tokens)): + init_id = tokenizer(initialize_tokens[ii]).input_ids[1] + temp_embedding = embedding_layer.weight[init_id] + initialize_embedding.append( + temp_embedding[None, ...].repeat(num_vectors_per_token, 1) + ) + else: + for ii in range(len(placeholder_tokens)): + init_id = tokenizer("a").input_ids[1] + temp_embedding = embedding_layer.weight[init_id] + len_emb = temp_embedding.shape[0] + init_weight = (torch.rand(num_vectors_per_token, len_emb) - 0.5) / 2.0 + initialize_embedding.append(init_weight) + + # initialize_embedding = torch.cat(initialize_embedding,dim=0) + + token_info_all = [] + for ii in range(len(placeholder_tokens)): + token_info = tokenizer.get_token_info(placeholder_tokens[ii]) + token_info["embedding"] = initialize_embedding[ii] + token_info["trainable"] = True + token_info_all.append(token_info) + embedding_layer.add_embeddings(token_info_all) diff --git a/iopaint/model/sd.py b/iopaint/model/sd.py new file mode 100644 index 0000000..4f20a41 --- /dev/null +++ b/iopaint/model/sd.py @@ -0,0 +1,129 @@ +import PIL.Image +import cv2 +import torch +from loguru import logger + +from .base import DiffusionInpaintModel +from .helper.cpu_text_encoder import CPUTextEncoderWrapper +from .original_sd_configs import get_config_files +from .utils import ( + handle_from_pretrained_exceptions, + get_torch_dtype, + enable_low_mem, + is_local_files_only, +) +from iopaint.schema import InpaintRequest, ModelType + + +class SD(DiffusionInpaintModel): + pad_mod = 8 + min_size = 512 + lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5" + + def init_model(self, device: torch.device, **kwargs): + from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + + model_kwargs = { + **kwargs.get("pipe_components", {}), + "local_files_only": is_local_files_only(**kwargs), + } + disable_nsfw_checker = kwargs["disable_nsfw"] or kwargs.get( + "cpu_offload", False + ) + if disable_nsfw_checker: + logger.info("Disable Stable Diffusion Model NSFW checker") + model_kwargs.update( + dict( + safety_checker=None, + feature_extractor=None, + requires_safety_checker=False, + ) + ) + + if self.model_info.is_single_file_diffusers: + if self.model_info.model_type == ModelType.DIFFUSERS_SD: + model_kwargs["num_in_channels"] = 4 + else: + model_kwargs["num_in_channels"] = 9 + + self.model = StableDiffusionInpaintPipeline.from_single_file( + self.model_id_or_path, + dtype=torch_dtype, + load_safety_checker=not disable_nsfw_checker, + config_files=get_config_files(), + **model_kwargs, + ) + else: + self.model = handle_from_pretrained_exceptions( + StableDiffusionInpaintPipeline.from_pretrained, + pretrained_model_name_or_path=self.model_id_or_path, + variant="fp16", + dtype=torch_dtype, + **model_kwargs, + ) + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + img_h, img_w = image.shape[:2] + + output = self.model( + image=PIL.Image.fromarray(image), + prompt=config.prompt, + negative_prompt=config.negative_prompt, + mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"), + num_inference_steps=config.sd_steps, + strength=config.sd_strength, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output + + +class SD15(SD): + name = "runwayml/stable-diffusion-inpainting" + model_id_or_path = "runwayml/stable-diffusion-inpainting" + + +class Anything4(SD): + name = "Sanster/anything-4.0-inpainting" + model_id_or_path = "Sanster/anything-4.0-inpainting" + + +class RealisticVision14(SD): + name = "Sanster/Realistic_Vision_V1.4-inpainting" + model_id_or_path = "Sanster/Realistic_Vision_V1.4-inpainting" + + +class SD2(SD): + name = "stabilityai/stable-diffusion-2-inpainting" + model_id_or_path = "stabilityai/stable-diffusion-2-inpainting" diff --git a/iopaint/model/sdxl.py b/iopaint/model/sdxl.py new file mode 100644 index 0000000..2557e71 --- /dev/null +++ b/iopaint/model/sdxl.py @@ -0,0 +1,110 @@ +import os + +import PIL.Image +import cv2 +import torch +from diffusers import AutoencoderKL +from loguru import logger + +from iopaint.schema import InpaintRequest, ModelType + +from .base import DiffusionInpaintModel +from .helper.cpu_text_encoder import CPUTextEncoderWrapper +from .original_sd_configs import get_config_files +from .utils import ( + handle_from_pretrained_exceptions, + get_torch_dtype, + enable_low_mem, + is_local_files_only, +) + + +class SDXL(DiffusionInpaintModel): + name = "diffusers/stable-diffusion-xl-1.0-inpainting-0.1" + pad_mod = 8 + min_size = 512 + lcm_lora_id = "latent-consistency/lcm-lora-sdxl" + model_id_or_path = "diffusers/stable-diffusion-xl-1.0-inpainting-0.1" + + def init_model(self, device: torch.device, **kwargs): + from diffusers.pipelines import StableDiffusionXLInpaintPipeline + + use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False)) + + if self.model_info.model_type == ModelType.DIFFUSERS_SDXL: + num_in_channels = 4 + else: + num_in_channels = 9 + + if os.path.isfile(self.model_id_or_path): + self.model = StableDiffusionXLInpaintPipeline.from_single_file( + self.model_id_or_path, + dtype=torch_dtype, + num_in_channels=num_in_channels, + load_safety_checker=False, + config_files=get_config_files() + ) + else: + model_kwargs = { + **kwargs.get("pipe_components", {}), + "local_files_only": is_local_files_only(**kwargs), + } + if "vae" not in model_kwargs: + vae = AutoencoderKL.from_pretrained( + "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch_dtype + ) + model_kwargs["vae"] = vae + self.model = handle_from_pretrained_exceptions( + StableDiffusionXLInpaintPipeline.from_pretrained, + pretrained_model_name_or_path=self.model_id_or_path, + torch_dtype=torch_dtype, + variant="fp16", + **model_kwargs + ) + + enable_low_mem(self.model, kwargs.get("low_mem", False)) + + if kwargs.get("cpu_offload", False) and use_gpu: + logger.info("Enable sequential cpu offload") + self.model.enable_sequential_cpu_offload(gpu_id=0) + else: + self.model = self.model.to(device) + if kwargs["sd_cpu_textencoder"]: + logger.info("Run Stable Diffusion TextEncoder on CPU") + self.model.text_encoder = CPUTextEncoderWrapper( + self.model.text_encoder, torch_dtype + ) + self.model.text_encoder_2 = CPUTextEncoderWrapper( + self.model.text_encoder_2, torch_dtype + ) + + self.callback = kwargs.pop("callback", None) + + def forward(self, image, mask, config: InpaintRequest): + """Input image and output image have same size + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + return: BGR IMAGE + """ + self.set_scheduler(config) + + img_h, img_w = image.shape[:2] + + output = self.model( + image=PIL.Image.fromarray(image), + prompt=config.prompt, + negative_prompt=config.negative_prompt, + mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"), + num_inference_steps=config.sd_steps, + strength=0.999 if config.sd_strength == 1.0 else config.sd_strength, + guidance_scale=config.sd_guidance_scale, + output_type="np", + callback_on_step_end=self.callback, + height=img_h, + width=img_w, + generator=torch.manual_seed(config.sd_seed), + ).images[0] + + output = (output * 255).round().astype("uint8") + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output diff --git a/lama_cleaner/model/utils.py b/iopaint/model/utils.py similarity index 87% rename from lama_cleaner/model/utils.py rename to iopaint/model/utils.py index 998db43..95c1184 100644 --- a/lama_cleaner/model/utils.py +++ b/iopaint/model/utils.py @@ -1,5 +1,7 @@ +import gc import math import random +import traceback from typing import Any import torch @@ -15,9 +17,15 @@ from diffusers import ( EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, + LCMScheduler, + DPMSolverSinglestepScheduler, + KDPM2DiscreteScheduler, + KDPM2AncestralDiscreteScheduler, + HeunDiscreteScheduler, ) +from loguru import logger -from lama_cleaner.schema import SDSampler +from iopaint.schema import SDSampler from torch import conv2d, conv_transpose2d @@ -27,7 +35,7 @@ def make_beta_schedule( if schedule == "linear": betas = ( torch.linspace( - linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64 + linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64 ) ** 2 ) @@ -772,7 +780,7 @@ def conv2d_resample( f=f, up=up, padding=[px0, px1, py0, py1], - gain=up ** 2, + gain=up**2, flip_filter=flip_filter, ) return x @@ -814,7 +822,7 @@ def conv2d_resample( x=x, f=f, padding=[px0 + pxt, px1 + pxt, py0 + pyt, py1 + pyt], - gain=up ** 2, + gain=up**2, flip_filter=flip_filter, ) if down > 1: @@ -834,7 +842,7 @@ def conv2d_resample( f=(f if up > 1 else None), up=up, padding=[px0, px1, py0, py1], - gain=up ** 2, + gain=up**2, flip_filter=flip_filter, ) x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight) @@ -870,7 +878,7 @@ class Conv2dLayer(torch.nn.Module): self.register_buffer("resample_filter", setup_filter(resample_filter)) self.conv_clamp = conv_clamp self.padding = kernel_size // 2 - self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) + self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size**2)) self.act_gain = activation_funcs[activation].def_gain memory_format = ( @@ -913,6 +921,7 @@ def torch_gc(): if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.ipc_collect() + gc.collect() def set_seed(seed: int): @@ -923,19 +932,98 @@ def set_seed(seed: int): def get_scheduler(sd_sampler, scheduler_config): - if sd_sampler == SDSampler.ddim: - return DDIMScheduler.from_config(scheduler_config) - elif sd_sampler == SDSampler.pndm: - return PNDMScheduler.from_config(scheduler_config) - elif sd_sampler == SDSampler.k_lms: - return LMSDiscreteScheduler.from_config(scheduler_config) - elif sd_sampler == SDSampler.k_euler: - return EulerDiscreteScheduler.from_config(scheduler_config) - elif sd_sampler == SDSampler.k_euler_a: - return EulerAncestralDiscreteScheduler.from_config(scheduler_config) - elif sd_sampler == SDSampler.dpm_plus_plus: - return DPMSolverMultistepScheduler.from_config(scheduler_config) - elif sd_sampler == SDSampler.uni_pc: - return UniPCMultistepScheduler.from_config(scheduler_config) + # https://github.com/huggingface/diffusers/issues/4167 + keys_to_pop = ["use_karras_sigmas", "algorithm_type"] + scheduler_config = dict(scheduler_config) + for it in keys_to_pop: + scheduler_config.pop(it, None) + + # fmt: off + samplers = { + SDSampler.dpm_plus_plus_2m: [DPMSolverMultistepScheduler], + SDSampler.dpm_plus_plus_2m_karras: [DPMSolverMultistepScheduler, dict(use_karras_sigmas=True)], + SDSampler.dpm_plus_plus_2m_sde: [DPMSolverMultistepScheduler, dict(algorithm_type="sde-dpmsolver++")], + SDSampler.dpm_plus_plus_2m_sde_karras: [DPMSolverMultistepScheduler, dict(algorithm_type="sde-dpmsolver++", use_karras_sigmas=True)], + SDSampler.dpm_plus_plus_sde: [DPMSolverSinglestepScheduler], + SDSampler.dpm_plus_plus_sde_karras: [DPMSolverSinglestepScheduler, dict(use_karras_sigmas=True)], + SDSampler.dpm2: [KDPM2DiscreteScheduler], + SDSampler.dpm2_karras: [KDPM2DiscreteScheduler, dict(use_karras_sigmas=True)], + SDSampler.dpm2_a: [KDPM2AncestralDiscreteScheduler], + SDSampler.dpm2_a_karras: [KDPM2AncestralDiscreteScheduler, dict(use_karras_sigmas=True)], + SDSampler.euler: [EulerDiscreteScheduler], + SDSampler.euler_a: [EulerAncestralDiscreteScheduler], + SDSampler.heun: [HeunDiscreteScheduler], + SDSampler.lms: [LMSDiscreteScheduler], + SDSampler.lms_karras: [LMSDiscreteScheduler, dict(use_karras_sigmas=True)], + SDSampler.ddim: [DDIMScheduler], + SDSampler.pndm: [PNDMScheduler], + SDSampler.uni_pc: [UniPCMultistepScheduler], + SDSampler.lcm: [LCMScheduler], + } + # fmt: on + if sd_sampler in samplers: + if len(samplers[sd_sampler]) == 2: + scheduler_cls, kwargs = samplers[sd_sampler] + else: + scheduler_cls, kwargs = samplers[sd_sampler][0], {} + return scheduler_cls.from_config(scheduler_config, **kwargs) else: raise ValueError(sd_sampler) + + +def is_local_files_only(**kwargs) -> bool: + from huggingface_hub.constants import HF_HUB_OFFLINE + + return HF_HUB_OFFLINE or kwargs.get("local_files_only", False) + + +def handle_from_pretrained_exceptions(func, **kwargs): + try: + return func(**kwargs) + except ValueError as e: + if "You are trying to load the model files of the `variant=fp16`" in str(e): + logger.info("variant=fp16 not found, try revision=fp16") + return func(**{**kwargs, "variant": None, "revision": "fp16"}) + raise e + except OSError as e: + previous_traceback = traceback.format_exc() + if "RevisionNotFoundError: 404 Client Error." in previous_traceback: + logger.info("revision=fp16 not found, try revision=main") + return func(**{**kwargs, "variant": None, "revision": "main"}) + elif "Max retries exceeded" in previous_traceback: + logger.exception( + "Fetching model from HuggingFace failed. " + "If this is your first time downloading the model, you may need to set up proxy in terminal." + "If the model has already been downloaded, you can add --local-files-only when starting." + ) + exit(-1) + raise e + except Exception as e: + raise e + + +def get_torch_dtype(device, no_half: bool): + device = str(device) + use_fp16 = not no_half + use_gpu = device == "cuda" + # https://github.com/huggingface/diffusers/issues/4480 + # pipe.enable_attention_slicing and float16 will cause black output on mps + # if device in ["cuda", "mps"] and use_fp16: + if device in ["cuda"] and use_fp16: + return use_gpu, torch.float16 + return use_gpu, torch.float32 + + +def enable_low_mem(pipe, enable: bool): + if torch.backends.mps.is_available(): + # https://huggingface.co/docs/diffusers/v0.25.0/en/api/pipelines/stable_diffusion/image_variation#diffusers.StableDiffusionImageVariationPipeline.enable_attention_slicing + # CUDA: Don't enable attention slicing if you're already using `scaled_dot_product_attention` (SDPA) from PyTorch 2.0 or xFormers. + if enable: + pipe.enable_attention_slicing("max") + else: + # https://huggingface.co/docs/diffusers/optimization/mps + # Devices with less than 64GB of memory are recommended to use enable_attention_slicing + pipe.enable_attention_slicing() + + if enable: + pipe.vae.enable_tiling() diff --git a/lama_cleaner/model/zits.py b/iopaint/model/zits.py similarity index 90% rename from lama_cleaner/model/zits.py rename to iopaint/model/zits.py index 664ca15..d58ac01 100644 --- a/lama_cleaner/model/zits.py +++ b/iopaint/model/zits.py @@ -5,11 +5,11 @@ import cv2 import torch import torch.nn.functional as F -from lama_cleaner.helper import get_cache_path_by_url, load_jit_model -from lama_cleaner.schema import Config +from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model +from iopaint.schema import InpaintRequest import numpy as np -from lama_cleaner.model.base import InpaintModel +from .base import InpaintModel ZITS_INPAINT_MODEL_URL = os.environ.get( "ZITS_INPAINT_MODEL_URL", @@ -171,14 +171,19 @@ def load_image(img, mask, device, sigma256=3.0): try: import skimage + gray_256 = skimage.color.rgb2gray(img_256) edge_256 = skimage.feature.canny(gray_256, sigma=3.0, mask=None).astype(float) # cv2.imwrite("skimage_gray.jpg", (gray_256*255).astype(np.uint8)) # cv2.imwrite("skimage_edge.jpg", (edge_256*255).astype(np.uint8)) except: gray_256 = cv2.cvtColor(img_256, cv2.COLOR_RGB2GRAY) - gray_256_blured = cv2.GaussianBlur(gray_256, ksize=(7, 7), sigmaX=sigma256, sigmaY=sigma256) - edge_256 = cv2.Canny(gray_256_blured, threshold1=int(255*0.1), threshold2=int(255*0.2)) + gray_256_blured = cv2.GaussianBlur( + gray_256, ksize=(7, 7), sigmaX=sigma256, sigmaY=sigma256 + ) + edge_256 = cv2.Canny( + gray_256_blured, threshold1=int(255 * 0.1), threshold2=int(255 * 0.2) + ) # cv2.imwrite("opencv_edge.jpg", edge_256) @@ -221,6 +226,7 @@ class ZITS(InpaintModel): min_size = 256 pad_mod = 32 pad_to_square = True + is_erase_model = True def __init__(self, device, **kwargs): """ @@ -233,12 +239,27 @@ class ZITS(InpaintModel): self.sample_edge_line_iterations = 1 def init_model(self, device, **kwargs): - self.wireframe = load_jit_model(ZITS_WIRE_FRAME_MODEL_URL, device, ZITS_WIRE_FRAME_MODEL_MD5) - self.edge_line = load_jit_model(ZITS_EDGE_LINE_MODEL_URL, device, ZITS_EDGE_LINE_MODEL_MD5) + self.wireframe = load_jit_model( + ZITS_WIRE_FRAME_MODEL_URL, device, ZITS_WIRE_FRAME_MODEL_MD5 + ) + self.edge_line = load_jit_model( + ZITS_EDGE_LINE_MODEL_URL, device, ZITS_EDGE_LINE_MODEL_MD5 + ) self.structure_upsample = load_jit_model( ZITS_STRUCTURE_UPSAMPLE_MODEL_URL, device, ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5 ) - self.inpaint = load_jit_model(ZITS_INPAINT_MODEL_URL, device, ZITS_INPAINT_MODEL_MD5) + self.inpaint = load_jit_model( + ZITS_INPAINT_MODEL_URL, device, ZITS_INPAINT_MODEL_MD5 + ) + + @staticmethod + def download(): + download_model(ZITS_WIRE_FRAME_MODEL_URL, ZITS_WIRE_FRAME_MODEL_MD5) + download_model(ZITS_EDGE_LINE_MODEL_URL, ZITS_EDGE_LINE_MODEL_MD5) + download_model( + ZITS_STRUCTURE_UPSAMPLE_MODEL_URL, ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5 + ) + download_model(ZITS_INPAINT_MODEL_URL, ZITS_INPAINT_MODEL_MD5) @staticmethod def is_downloaded() -> bool: @@ -322,7 +343,7 @@ class ZITS(InpaintModel): items["line"] = line_pred.detach() @torch.no_grad() - def forward(self, image, mask, config: Config): + def forward(self, image, mask, config: InpaintRequest): """Input images and output images have same size images: [H, W, C] RGB masks: [H, W] @@ -385,12 +406,20 @@ class ZITS(InpaintModel): if score > mask_th: try: import skimage + rr, cc, value = skimage.draw.line_aa( *to_int(line[0:2]), *to_int(line[2:4]) ) lmap[rr, cc] = np.maximum(lmap[rr, cc], value) except: - cv2.line(lmap, to_int(line[0:2][::-1]), to_int(line[2:4][::-1]), (1, 1, 1), 1, cv2.LINE_AA) + cv2.line( + lmap, + to_int(line[0:2][::-1]), + to_int(line[2:4][::-1]), + (1, 1, 1), + 1, + cv2.LINE_AA, + ) lmap = np.clip(lmap * 255, 0, 255).astype(np.uint8) lines_tensor.append(to_tensor(lmap).unsqueeze(0)) diff --git a/iopaint/model_info.py b/iopaint/model_info.py new file mode 100644 index 0000000..8021fa3 --- /dev/null +++ b/iopaint/model_info.py @@ -0,0 +1,103 @@ +from typing import List + +from pydantic import computed_field, BaseModel + +from iopaint.const import ( + SDXL_CONTROLNET_CHOICES, + SD2_CONTROLNET_CHOICES, + SD_CONTROLNET_CHOICES, + INSTRUCT_PIX2PIX_NAME, + KANDINSKY22_NAME, + POWERPAINT_NAME, + ANYTEXT_NAME, +) +from iopaint.schema import ModelType + + +class ModelInfo(BaseModel): + name: str + path: str + model_type: ModelType + is_single_file_diffusers: bool = False + + @computed_field + @property + def need_prompt(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] or self.name in [ + INSTRUCT_PIX2PIX_NAME, + KANDINSKY22_NAME, + POWERPAINT_NAME, + ANYTEXT_NAME, + ] + + @computed_field + @property + def controlnets(self) -> List[str]: + if self.model_type in [ + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SDXL_INPAINT, + ]: + return SDXL_CONTROLNET_CHOICES + if self.model_type in [ModelType.DIFFUSERS_SD, ModelType.DIFFUSERS_SD_INPAINT]: + if "sd2" in self.name.lower(): + return SD2_CONTROLNET_CHOICES + else: + return SD_CONTROLNET_CHOICES + if self.name == POWERPAINT_NAME: + return SD_CONTROLNET_CHOICES + return [] + + @computed_field + @property + def support_strength(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] or self.name in [POWERPAINT_NAME, ANYTEXT_NAME] + + @computed_field + @property + def support_outpainting(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] or self.name in [KANDINSKY22_NAME, POWERPAINT_NAME] + + @computed_field + @property + def support_lcm_lora(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] + + @computed_field + @property + def support_controlnet(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] + + @computed_field + @property + def support_freeu(self) -> bool: + return self.model_type in [ + ModelType.DIFFUSERS_SD, + ModelType.DIFFUSERS_SDXL, + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SDXL_INPAINT, + ] or self.name in [INSTRUCT_PIX2PIX_NAME] diff --git a/iopaint/model_manager.py b/iopaint/model_manager.py new file mode 100644 index 0000000..a5ad87d --- /dev/null +++ b/iopaint/model_manager.py @@ -0,0 +1,192 @@ +from typing import List, Dict + +import torch +from loguru import logger +import numpy as np + +from iopaint.download import scan_models +from iopaint.helper import switch_mps_device +from iopaint.model import models, ControlNet, SD, SDXL +from iopaint.model.utils import torch_gc, is_local_files_only +from iopaint.model_info import ModelInfo, ModelType +from iopaint.schema import InpaintRequest + + +class ModelManager: + def __init__(self, name: str, device: torch.device, **kwargs): + self.name = name + self.device = device + self.kwargs = kwargs + self.available_models: Dict[str, ModelInfo] = {} + self.scan_models() + + self.enable_controlnet = kwargs.get("enable_controlnet", False) + controlnet_method = kwargs.get("controlnet_method", None) + if ( + controlnet_method is None + and name in self.available_models + and self.available_models[name].support_controlnet + ): + controlnet_method = self.available_models[name].controlnets[0] + self.controlnet_method = controlnet_method + self.model = self.init_model(name, device, **kwargs) + + @property + def current_model(self) -> ModelInfo: + return self.available_models[self.name] + + def init_model(self, name: str, device, **kwargs): + logger.info(f"Loading model: {name}") + if name not in self.available_models: + raise NotImplementedError( + f"Unsupported model: {name}. Available models: {list(self.available_models.keys())}" + ) + + model_info = self.available_models[name] + kwargs = { + **kwargs, + "model_info": model_info, + "enable_controlnet": self.enable_controlnet, + "controlnet_method": self.controlnet_method, + } + + if model_info.support_controlnet and self.enable_controlnet: + return ControlNet(device, **kwargs) + elif model_info.name in models: + return models[name](device, **kwargs) + else: + if model_info.model_type in [ + ModelType.DIFFUSERS_SD_INPAINT, + ModelType.DIFFUSERS_SD, + ]: + return SD(device, **kwargs) + + if model_info.model_type in [ + ModelType.DIFFUSERS_SDXL_INPAINT, + ModelType.DIFFUSERS_SDXL, + ]: + return SDXL(device, **kwargs) + + raise NotImplementedError(f"Unsupported model: {name}") + + @torch.inference_mode() + def __call__(self, image, mask, config: InpaintRequest): + """ + + Args: + image: [H, W, C] RGB + mask: [H, W, 1] 255 means area to repaint + config: + + Returns: + BGR image + """ + self.switch_controlnet_method(config) + self.enable_disable_freeu(config) + self.enable_disable_lcm_lora(config) + return self.model(image, mask, config).astype(np.uint8) + + def scan_models(self) -> List[ModelInfo]: + available_models = scan_models() + self.available_models = {it.name: it for it in available_models} + return available_models + + def switch(self, new_name: str): + if new_name == self.name: + return + + old_name = self.name + old_controlnet_method = self.controlnet_method + self.name = new_name + + if ( + self.available_models[new_name].support_controlnet + and self.controlnet_method + not in self.available_models[new_name].controlnets + ): + self.controlnet_method = self.available_models[new_name].controlnets[0] + try: + # TODO: enable/disable controlnet without reload model + del self.model + torch_gc() + + self.model = self.init_model( + new_name, switch_mps_device(new_name, self.device), **self.kwargs + ) + except Exception as e: + self.name = old_name + self.controlnet_method = old_controlnet_method + logger.info(f"Switch model from {old_name} to {new_name} failed, rollback") + self.model = self.init_model( + old_name, switch_mps_device(old_name, self.device), **self.kwargs + ) + raise e + + def switch_controlnet_method(self, config): + if not self.available_models[self.name].support_controlnet: + return + + if ( + self.enable_controlnet + and config.controlnet_method + and self.controlnet_method != config.controlnet_method + ): + old_controlnet_method = self.controlnet_method + self.controlnet_method = config.controlnet_method + self.model.switch_controlnet_method(config.controlnet_method) + logger.info( + f"Switch Controlnet method from {old_controlnet_method} to {config.controlnet_method}" + ) + elif self.enable_controlnet != config.enable_controlnet: + self.enable_controlnet = config.enable_controlnet + self.controlnet_method = config.controlnet_method + + pipe_components = { + "vae": self.model.model.vae, + "text_encoder": self.model.model.text_encoder, + "unet": self.model.model.unet, + } + if hasattr(self.model.model, "text_encoder_2"): + pipe_components["text_encoder_2"] = self.model.model.text_encoder_2 + + self.model = self.init_model( + self.name, + switch_mps_device(self.name, self.device), + pipe_components=pipe_components, + **self.kwargs, + ) + if not config.enable_controlnet: + logger.info(f"Disable controlnet") + else: + logger.info(f"Enable controlnet: {config.controlnet_method}") + + def enable_disable_freeu(self, config: InpaintRequest): + if str(self.model.device) == "mps": + return + + if self.available_models[self.name].support_freeu: + if config.sd_freeu: + freeu_config = config.sd_freeu_config + self.model.model.enable_freeu( + s1=freeu_config.s1, + s2=freeu_config.s2, + b1=freeu_config.b1, + b2=freeu_config.b2, + ) + else: + self.model.model.disable_freeu() + + def enable_disable_lcm_lora(self, config: InpaintRequest): + if self.available_models[self.name].support_lcm_lora: + # TODO: change this if load other lora is supported + lcm_lora_loaded = bool(self.model.model.get_list_adapters()) + if config.sd_lcm_lora: + if not lcm_lora_loaded: + self.model.model.load_lora_weights( + self.model.lcm_lora_id, + weight_name="pytorch_lora_weights.safetensors", + local_files_only=is_local_files_only(), + ) + else: + if lcm_lora_loaded: + self.model.model.disable_lora() diff --git a/iopaint/plugins/__init__.py b/iopaint/plugins/__init__.py new file mode 100644 index 0000000..3e7d5cf --- /dev/null +++ b/iopaint/plugins/__init__.py @@ -0,0 +1,73 @@ +from typing import Dict + +from loguru import logger + +from .anime_seg import AnimeSeg +from .gfpgan_plugin import GFPGANPlugin +from .interactive_seg import InteractiveSeg +from .realesrgan import RealESRGANUpscaler +from .remove_bg import RemoveBG +from .restoreformer import RestoreFormerPlugin +from ..schema import InteractiveSegModel, Device, RealESRGANModel + + +def build_plugins( + enable_interactive_seg: bool, + interactive_seg_model: InteractiveSegModel, + interactive_seg_device: Device, + enable_remove_bg: bool, + enable_anime_seg: bool, + enable_realesrgan: bool, + realesrgan_device: Device, + realesrgan_model: RealESRGANModel, + enable_gfpgan: bool, + gfpgan_device: Device, + enable_restoreformer: bool, + restoreformer_device: Device, + no_half: bool, +) -> Dict: + plugins = {} + if enable_interactive_seg: + logger.info(f"Initialize {InteractiveSeg.name} plugin") + plugins[InteractiveSeg.name] = InteractiveSeg( + interactive_seg_model, interactive_seg_device + ) + + if enable_remove_bg: + logger.info(f"Initialize {RemoveBG.name} plugin") + plugins[RemoveBG.name] = RemoveBG() + + if enable_anime_seg: + logger.info(f"Initialize {AnimeSeg.name} plugin") + plugins[AnimeSeg.name] = AnimeSeg() + + if enable_realesrgan: + logger.info( + f"Initialize {RealESRGANUpscaler.name} plugin: {realesrgan_model}, {realesrgan_device}" + ) + plugins[RealESRGANUpscaler.name] = RealESRGANUpscaler( + realesrgan_model, + realesrgan_device, + no_half=no_half, + ) + + if enable_gfpgan: + logger.info(f"Initialize {GFPGANPlugin.name} plugin") + if enable_realesrgan: + logger.info("Use realesrgan as GFPGAN background upscaler") + else: + logger.info( + f"GFPGAN no background upscaler, use --enable-realesrgan to enable it" + ) + plugins[GFPGANPlugin.name] = GFPGANPlugin( + gfpgan_device, + upscaler=plugins.get(RealESRGANUpscaler.name, None), + ) + + if enable_restoreformer: + logger.info(f"Initialize {RestoreFormerPlugin.name} plugin") + plugins[RestoreFormerPlugin.name] = RestoreFormerPlugin( + restoreformer_device, + upscaler=plugins.get(RealESRGANUpscaler.name, None), + ) + return plugins diff --git a/lama_cleaner/plugins/anime_seg.py b/iopaint/plugins/anime_seg.py similarity index 95% rename from lama_cleaner/plugins/anime_seg.py rename to iopaint/plugins/anime_seg.py index ecfc7d1..286564b 100644 --- a/lama_cleaner/plugins/anime_seg.py +++ b/iopaint/plugins/anime_seg.py @@ -5,8 +5,9 @@ import torch.nn.functional as F import numpy as np from PIL import Image -from lama_cleaner.helper import load_model -from lama_cleaner.plugins.base_plugin import BasePlugin +from iopaint.helper import load_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest class REBNCONV(nn.Module): @@ -415,6 +416,8 @@ ANIME_SEG_MODELS = { class AnimeSeg(BasePlugin): # Model from: https://github.com/SkyTNT/anime-segmentation name = "AnimeSeg" + support_gen_image = True + support_gen_mask = True def __init__(self): super().__init__() @@ -425,10 +428,19 @@ class AnimeSeg(BasePlugin): ANIME_SEG_MODELS["md5"], ) - def __call__(self, rgb_np_img, files, form): + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + mask = self.forward(rgb_np_img) + mask = Image.fromarray(mask, mode="L") + h0, w0 = rgb_np_img.shape[0], rgb_np_img.shape[1] + empty = Image.new("RGBA", (w0, h0), 0) + img = Image.fromarray(rgb_np_img) + cutout = Image.composite(img, empty, mask) + return np.asarray(cutout) + + def gen_mask(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: return self.forward(rgb_np_img) - @torch.no_grad() + @torch.inference_mode() def forward(self, rgb_np_img): s = 1024 @@ -447,9 +459,4 @@ class AnimeSeg(BasePlugin): mask = self.model(tmpImg) mask = mask[0, :, ph // 2 : ph // 2 + h, pw // 2 : pw // 2 + w] mask = cv2.resize(mask.cpu().numpy().transpose((1, 2, 0)), (w0, h0)) - mask = Image.fromarray((mask * 255).astype("uint8"), mode="L") - - empty = Image.new("RGBA", (w0, h0), 0) - img = Image.fromarray(rgb_np_img) - cutout = Image.composite(img, empty, mask) - return np.asarray(cutout) + return (mask * 255).astype("uint8") diff --git a/iopaint/plugins/base_plugin.py b/iopaint/plugins/base_plugin.py new file mode 100644 index 0000000..13dfdad --- /dev/null +++ b/iopaint/plugins/base_plugin.py @@ -0,0 +1,27 @@ +from loguru import logger +import numpy as np + +from iopaint.schema import RunPluginRequest + + +class BasePlugin: + name: str + support_gen_image: bool = False + support_gen_mask: bool = False + + def __init__(self): + err_msg = self.check_dep() + if err_msg: + logger.error(err_msg) + exit(-1) + + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + # return RGBA np image or BGR np image + ... + + def gen_mask(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + # return GRAY or BGR np image, 255 means foreground, 0 means background + ... + + def check_dep(self): + ... diff --git a/lama_cleaner/plugins/gfpgan_plugin.py b/iopaint/plugins/gfpgan_plugin.py similarity index 89% rename from lama_cleaner/plugins/gfpgan_plugin.py rename to iopaint/plugins/gfpgan_plugin.py index 2422094..619280b 100644 --- a/lama_cleaner/plugins/gfpgan_plugin.py +++ b/iopaint/plugins/gfpgan_plugin.py @@ -1,12 +1,15 @@ import cv2 +import numpy as np from loguru import logger -from lama_cleaner.helper import download_model -from lama_cleaner.plugins.base_plugin import BasePlugin +from iopaint.helper import download_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest class GFPGANPlugin(BasePlugin): name = "GFPGAN" + support_gen_image = True def __init__(self, device, upscaler=None): super().__init__() @@ -36,7 +39,7 @@ class GFPGANPlugin(BasePlugin): self.face_enhancer.face_helper.face_det.to(device) ) - def __call__(self, rgb_np_img, files, form): + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: weight = 0.5 bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) logger.info(f"GFPGAN input shape: {bgr_np_img.shape}") diff --git a/lama_cleaner/plugins/gfpganer.py b/iopaint/plugins/gfpganer.py similarity index 100% rename from lama_cleaner/plugins/gfpganer.py rename to iopaint/plugins/gfpganer.py diff --git a/lama_cleaner/plugins/interactive_seg.py b/iopaint/plugins/interactive_seg.py similarity index 66% rename from lama_cleaner/plugins/interactive_seg.py rename to iopaint/plugins/interactive_seg.py index 9cb5371..7c03ba5 100644 --- a/lama_cleaner/plugins/interactive_seg.py +++ b/iopaint/plugins/interactive_seg.py @@ -1,12 +1,16 @@ +import hashlib import json +from typing import List import cv2 import numpy as np +import torch from loguru import logger -from lama_cleaner.helper import download_model -from lama_cleaner.plugins.base_plugin import BasePlugin -from lama_cleaner.plugins.segment_anything import SamPredictor, sam_model_registry +from iopaint.helper import download_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.plugins.segment_anything import SamPredictor, sam_model_registry +from iopaint.schema import RunPluginRequest # 从小到大 SEGMENT_ANYTHING_MODELS = { @@ -22,11 +26,16 @@ SEGMENT_ANYTHING_MODELS = { "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", "md5": "4b8939a88964f0f4ff5f5b2642c598a6", }, + "mobile_sam": { + "url": "https://github.com/Sanster/models/releases/download/MobileSAM/mobile_sam.pt", + "md5": "f3c0d8cda613564d499310dab6c812cd", + }, } class InteractiveSeg(BasePlugin): name = "InteractiveSeg" + support_gen_mask = True def __init__(self, model_name, device): super().__init__() @@ -40,11 +49,12 @@ class InteractiveSeg(BasePlugin): ) self.prev_img_md5 = None - def __call__(self, rgb_np_img, files, form): - clicks = json.loads(form["clicks"]) - return self.forward(rgb_np_img, clicks, form["img_md5"]) + def gen_mask(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + img_md5 = hashlib.md5(req.image.encode("utf-8")).hexdigest() + return self.forward(rgb_np_img, req.clicks, img_md5) - def forward(self, rgb_np_img, clicks, img_md5): + @torch.inference_mode() + def forward(self, rgb_np_img, clicks: List[List], img_md5: str): input_point = [] input_label = [] for click in clicks: @@ -63,13 +73,4 @@ class InteractiveSeg(BasePlugin): multimask_output=False, ) mask = masks[0].astype(np.uint8) * 255 - # TODO: how to set kernel size? - kernel_size = 9 - mask = cv2.dilate( - mask, np.ones((kernel_size, kernel_size), np.uint8), iterations=1 - ) - # fronted brush color "ffcc00bb" - res_mask = np.zeros((mask.shape[0], mask.shape[1], 4), dtype=np.uint8) - res_mask[mask == 255] = [255, 203, 0, int(255 * 0.73)] - res_mask = cv2.cvtColor(res_mask, cv2.COLOR_BGRA2RGBA) - return res_mask + return mask diff --git a/lama_cleaner/plugins/realesrgan.py b/iopaint/plugins/realesrgan.py similarity index 84% rename from lama_cleaner/plugins/realesrgan.py rename to iopaint/plugins/realesrgan.py index 6dcf761..8165fa3 100644 --- a/lama_cleaner/plugins/realesrgan.py +++ b/iopaint/plugins/realesrgan.py @@ -1,15 +1,16 @@ -from enum import Enum - import cv2 +import numpy as np +import torch from loguru import logger -from lama_cleaner.const import RealESRGANModelName -from lama_cleaner.helper import download_model -from lama_cleaner.plugins.base_plugin import BasePlugin +from iopaint.helper import download_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest, RealESRGANModel class RealESRGANUpscaler(BasePlugin): name = "RealESRGAN" + support_gen_image = True def __init__(self, name, device, no_half=False): super().__init__() @@ -18,7 +19,7 @@ class RealESRGANUpscaler(BasePlugin): from realesrgan.archs.srvgg_arch import SRVGGNetCompact REAL_ESRGAN_MODELS = { - RealESRGANModelName.realesr_general_x4v3: { + RealESRGANModel.realesr_general_x4v3: { "url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth", "scale": 4, "model": lambda: SRVGGNetCompact( @@ -31,7 +32,7 @@ class RealESRGANUpscaler(BasePlugin): ), "model_md5": "91a7644643c884ee00737db24e478156", }, - RealESRGANModelName.RealESRGAN_x4plus: { + RealESRGANModel.RealESRGAN_x4plus: { "url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth", "scale": 4, "model": lambda: RRDBNet( @@ -44,7 +45,7 @@ class RealESRGANUpscaler(BasePlugin): ), "model_md5": "99ec365d4afad750833258a1a24f44ca", }, - RealESRGANModelName.RealESRGAN_x4plus_anime_6B: { + RealESRGANModel.RealESRGAN_x4plus_anime_6B: { "url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth", "scale": 4, "model": lambda: RRDBNet( @@ -76,14 +77,14 @@ class RealESRGANUpscaler(BasePlugin): device=device, ) - def __call__(self, rgb_np_img, files, form): + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) - scale = float(form["upscale"]) - logger.info(f"RealESRGAN input shape: {bgr_np_img.shape}, scale: {scale}") - result = self.forward(bgr_np_img, scale) + logger.info(f"RealESRGAN input shape: {bgr_np_img.shape}, scale: {req.scale}") + result = self.forward(bgr_np_img, req.scale) logger.info(f"RealESRGAN output shape: {result.shape}") return result + @torch.inference_mode() def forward(self, bgr_np_img, scale: float): # 输出是 BGR upsampled = self.model.enhance(bgr_np_img, outscale=scale)[0] diff --git a/lama_cleaner/plugins/remove_bg.py b/iopaint/plugins/remove_bg.py similarity index 60% rename from lama_cleaner/plugins/remove_bg.py rename to iopaint/plugins/remove_bg.py index 4025198..55de64f 100644 --- a/lama_cleaner/plugins/remove_bg.py +++ b/iopaint/plugins/remove_bg.py @@ -3,11 +3,14 @@ import cv2 import numpy as np from torch.hub import get_dir -from lama_cleaner.plugins.base_plugin import BasePlugin +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest class RemoveBG(BasePlugin): name = "RemoveBG" + support_gen_mask = True + support_gen_image = True def __init__(self): super().__init__() @@ -19,17 +22,24 @@ class RemoveBG(BasePlugin): self.session = new_session(model_name="u2net") - def __call__(self, rgb_np_img, files, form): - bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) - return self.forward(bgr_np_img) - - def forward(self, bgr_np_img) -> np.ndarray: + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: from rembg import remove + bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) + # return BGRA image output = remove(bgr_np_img, session=self.session) return cv2.cvtColor(output, cv2.COLOR_BGRA2RGBA) + def gen_mask(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: + from rembg import remove + + bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) + + # return BGR image, 255 means foreground, 0 means background + output = remove(bgr_np_img, session=self.session, only_mask=True) + return output + def check_dep(self): try: import rembg diff --git a/lama_cleaner/plugins/restoreformer.py b/iopaint/plugins/restoreformer.py similarity index 86% rename from lama_cleaner/plugins/restoreformer.py rename to iopaint/plugins/restoreformer.py index 0cd8b10..4e1d3e7 100644 --- a/lama_cleaner/plugins/restoreformer.py +++ b/iopaint/plugins/restoreformer.py @@ -1,12 +1,15 @@ import cv2 +import numpy as np from loguru import logger -from lama_cleaner.helper import download_model -from lama_cleaner.plugins.base_plugin import BasePlugin +from iopaint.helper import download_model +from iopaint.plugins.base_plugin import BasePlugin +from iopaint.schema import RunPluginRequest class RestoreFormerPlugin(BasePlugin): name = "RestoreFormer" + support_gen_image = True def __init__(self, device, upscaler=None): super().__init__() @@ -31,7 +34,7 @@ class RestoreFormerPlugin(BasePlugin): bg_upsampler=upscaler.model if upscaler is not None else None, ) - def __call__(self, rgb_np_img, files, form): + def gen_image(self, rgb_np_img, req: RunPluginRequest) -> np.ndarray: weight = 0.5 bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR) logger.info(f"RestoreFormer input shape: {bgr_np_img.shape}") diff --git a/lama_cleaner/plugins/segment_anything/__init__.py b/iopaint/plugins/segment_anything/__init__.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/__init__.py rename to iopaint/plugins/segment_anything/__init__.py diff --git a/lama_cleaner/plugins/segment_anything/build_sam.py b/iopaint/plugins/segment_anything/build_sam.py similarity index 60% rename from lama_cleaner/plugins/segment_anything/build_sam.py rename to iopaint/plugins/segment_anything/build_sam.py index 07abfca..f8dea8e 100644 --- a/lama_cleaner/plugins/segment_anything/build_sam.py +++ b/iopaint/plugins/segment_anything/build_sam.py @@ -8,7 +8,15 @@ import torch from functools import partial -from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer +from iopaint.plugins.segment_anything.modeling.tiny_vit_sam import TinyViT + +from .modeling import ( + ImageEncoderViT, + MaskDecoder, + PromptEncoder, + Sam, + TwoWayTransformer, +) def build_sam_vit_h(checkpoint=None): @@ -44,11 +52,64 @@ def build_sam_vit_b(checkpoint=None): ) +def build_sam_vit_t(checkpoint=None): + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + mobile_sam = Sam( + image_encoder=TinyViT( + img_size=1024, + in_chans=3, + num_classes=1000, + embed_dims=[64, 128, 160, 320], + depths=[2, 2, 6, 2], + num_heads=[2, 4, 5, 10], + window_sizes=[7, 7, 14, 7], + mlp_ratio=4.0, + drop_rate=0.0, + drop_path_rate=0.0, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=0.8, + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + + mobile_sam.eval() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f) + mobile_sam.load_state_dict(state_dict) + return mobile_sam + + sam_model_registry = { "default": build_sam, "vit_h": build_sam, "vit_l": build_sam_vit_l, "vit_b": build_sam_vit_b, + "mobile_sam": build_sam_vit_t, } diff --git a/lama_cleaner/plugins/segment_anything/modeling/__init__.py b/iopaint/plugins/segment_anything/modeling/__init__.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/modeling/__init__.py rename to iopaint/plugins/segment_anything/modeling/__init__.py diff --git a/lama_cleaner/plugins/segment_anything/modeling/common.py b/iopaint/plugins/segment_anything/modeling/common.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/modeling/common.py rename to iopaint/plugins/segment_anything/modeling/common.py diff --git a/lama_cleaner/plugins/segment_anything/modeling/image_encoder.py b/iopaint/plugins/segment_anything/modeling/image_encoder.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/modeling/image_encoder.py rename to iopaint/plugins/segment_anything/modeling/image_encoder.py diff --git a/lama_cleaner/plugins/segment_anything/modeling/mask_decoder.py b/iopaint/plugins/segment_anything/modeling/mask_decoder.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/modeling/mask_decoder.py rename to iopaint/plugins/segment_anything/modeling/mask_decoder.py diff --git a/lama_cleaner/plugins/segment_anything/modeling/prompt_encoder.py b/iopaint/plugins/segment_anything/modeling/prompt_encoder.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/modeling/prompt_encoder.py rename to iopaint/plugins/segment_anything/modeling/prompt_encoder.py diff --git a/lama_cleaner/plugins/segment_anything/modeling/sam.py b/iopaint/plugins/segment_anything/modeling/sam.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/modeling/sam.py rename to iopaint/plugins/segment_anything/modeling/sam.py diff --git a/iopaint/plugins/segment_anything/modeling/tiny_vit_sam.py b/iopaint/plugins/segment_anything/modeling/tiny_vit_sam.py new file mode 100644 index 0000000..a5127c7 --- /dev/null +++ b/iopaint/plugins/segment_anything/modeling/tiny_vit_sam.py @@ -0,0 +1,822 @@ +# -------------------------------------------------------- +# TinyViT Model Architecture +# Copyright (c) 2022 Microsoft +# Adapted from LeViT and Swin Transformer +# LeViT: (https://github.com/facebookresearch/levit) +# Swin: (https://github.com/microsoft/swin-transformer) +# Build the TinyViT Model +# -------------------------------------------------------- + +import collections +import itertools +import math +import warnings +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from typing import Tuple + + +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return x + return tuple(itertools.repeat(x, n)) + + return parse + + +to_2tuple = _ntuple(2) + + +def _trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2, + ) + + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.0)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are + applied while sampling the normal with mean/std applied, therefore a, b args + should be adjusted to match the range of mean, std args. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + with torch.no_grad(): + return _trunc_normal_(tensor, mean, std, a, b) + + +def drop_path( + x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True +): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0.0 or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * ( + x.ndim - 1 + ) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +class TimmDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True): + super(TimmDropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) + + def extra_repr(self): + return f"drop_prob={round(self.drop_prob,3):0.3f}" + + +class Conv2d_BN(torch.nn.Sequential): + def __init__( + self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1 + ): + super().__init__() + self.add_module( + "c", torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False) + ) + bn = torch.nn.BatchNorm2d(b) + torch.nn.init.constant_(bn.weight, bn_weight_init) + torch.nn.init.constant_(bn.bias, 0) + self.add_module("bn", bn) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = torch.nn.Conv2d( + w.size(1) * self.c.groups, + w.size(0), + w.shape[2:], + stride=self.c.stride, + padding=self.c.padding, + dilation=self.c.dilation, + groups=self.c.groups, + ) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class DropPath(TimmDropPath): + def __init__(self, drop_prob=None): + super().__init__(drop_prob=drop_prob) + self.drop_prob = drop_prob + + def __repr__(self): + msg = super().__repr__() + msg += f"(drop_prob={self.drop_prob})" + return msg + + +class PatchEmbed(nn.Module): + def __init__(self, in_chans, embed_dim, resolution, activation): + super().__init__() + img_size: Tuple[int, int] = to_2tuple(resolution) + self.patches_resolution = (img_size[0] // 4, img_size[1] // 4) + self.num_patches = self.patches_resolution[0] * self.patches_resolution[1] + self.in_chans = in_chans + self.embed_dim = embed_dim + n = embed_dim + self.seq = nn.Sequential( + Conv2d_BN(in_chans, n // 2, 3, 2, 1), + activation(), + Conv2d_BN(n // 2, n, 3, 2, 1), + ) + + def forward(self, x): + return self.seq(x) + + +class MBConv(nn.Module): + def __init__(self, in_chans, out_chans, expand_ratio, activation, drop_path): + super().__init__() + self.in_chans = in_chans + self.hidden_chans = int(in_chans * expand_ratio) + self.out_chans = out_chans + + self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1) + self.act1 = activation() + + self.conv2 = Conv2d_BN( + self.hidden_chans, + self.hidden_chans, + ks=3, + stride=1, + pad=1, + groups=self.hidden_chans, + ) + self.act2 = activation() + + self.conv3 = Conv2d_BN(self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0) + self.act3 = activation() + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.act2(x) + + x = self.conv3(x) + + x = self.drop_path(x) + + x += shortcut + x = self.act3(x) + + return x + + +class PatchMerging(nn.Module): + def __init__(self, input_resolution, dim, out_dim, activation): + super().__init__() + + self.input_resolution = input_resolution + self.dim = dim + self.out_dim = out_dim + self.act = activation() + self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0) + stride_c = 2 + if out_dim == 320 or out_dim == 448 or out_dim == 576: + stride_c = 1 + self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim) + self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0) + + def forward(self, x): + if x.ndim == 3: + H, W = self.input_resolution + B = len(x) + # (B, C, H, W) + x = x.view(B, H, W, -1).permute(0, 3, 1, 2) + + x = self.conv1(x) + x = self.act(x) + + x = self.conv2(x) + x = self.act(x) + x = self.conv3(x) + x = x.flatten(2).transpose(1, 2) + return x + + +class ConvLayer(nn.Module): + def __init__( + self, + dim, + input_resolution, + depth, + activation, + drop_path=0.0, + downsample=None, + use_checkpoint=False, + out_dim=None, + conv_expand_ratio=4.0, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList( + [ + MBConv( + dim, + dim, + conv_expand_ratio, + activation, + drop_path[i] if isinstance(drop_path, list) else drop_path, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, out_dim=out_dim, activation=activation + ) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.norm = nn.LayerNorm(in_features) + self.fc1 = nn.Linear(in_features, hidden_features) + self.fc2 = nn.Linear(hidden_features, out_features) + self.act = act_layer() + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.norm(x) + + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(torch.nn.Module): + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + resolution=(14, 14), + ): + super().__init__() + # (h, w) + assert isinstance(resolution, tuple) and len(resolution) == 2 + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + h = self.dh + nh_kd * 2 + + self.norm = nn.LayerNorm(dim) + self.qkv = nn.Linear(dim, h) + self.proj = nn.Linear(self.dh, dim) + + points = list(itertools.product(range(resolution[0]), range(resolution[1]))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = torch.nn.Parameter( + torch.zeros(num_heads, len(attention_offsets)) + ) + self.register_buffer( + "attention_bias_idxs", torch.LongTensor(idxs).view(N, N), persistent=False + ) + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and hasattr(self, "ab"): + del self.ab + else: + self.register_buffer( + "ab", + self.attention_biases[:, self.attention_bias_idxs], + persistent=False, + ) + + def forward(self, x): # x (B,N,C) + B, N, _ = x.shape + + # Normalization + x = self.norm(x) + + qkv = self.qkv(x) + # (B, N, num_heads, d) + q, k, v = qkv.view(B, N, self.num_heads, -1).split( + [self.key_dim, self.key_dim, self.d], dim=3 + ) + # (B, num_heads, N, d) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + ( + self.attention_biases[:, self.attention_bias_idxs] + if self.training + else self.ab + ) + attn = attn.softmax(dim=-1) + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class TinyViTBlock(nn.Module): + r"""TinyViT Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int, int]): Input resolution. + num_heads (int): Number of attention heads. + window_size (int): Window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + local_conv_size (int): the kernel size of the convolution between + Attention and MLP. Default: 3 + activation: the activation function. Default: nn.GELU + """ + + def __init__( + self, + dim, + input_resolution, + num_heads, + window_size=7, + mlp_ratio=4.0, + drop=0.0, + drop_path=0.0, + local_conv_size=3, + activation=nn.GELU, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + assert window_size > 0, "window_size must be greater than 0" + self.window_size = window_size + self.mlp_ratio = mlp_ratio + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + assert dim % num_heads == 0, "dim must be divisible by num_heads" + head_dim = dim // num_heads + + window_resolution = (window_size, window_size) + self.attn = Attention( + dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution + ) + + mlp_hidden_dim = int(dim * mlp_ratio) + mlp_activation = activation + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=mlp_activation, + drop=drop, + ) + + pad = local_conv_size // 2 + self.local_conv = Conv2d_BN( + dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim + ) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + res_x = x + if H == self.window_size and W == self.window_size: + x = self.attn(x) + else: + x = x.view(B, H, W, C) + pad_b = (self.window_size - H % self.window_size) % self.window_size + pad_r = (self.window_size - W % self.window_size) % self.window_size + padding = pad_b > 0 or pad_r > 0 + + if padding: + x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) + + pH, pW = H + pad_b, W + pad_r + nH = pH // self.window_size + nW = pW // self.window_size + # window partition + x = ( + x.view(B, nH, self.window_size, nW, self.window_size, C) + .transpose(2, 3) + .reshape(B * nH * nW, self.window_size * self.window_size, C) + ) + x = self.attn(x) + # window reverse + x = ( + x.view(B, nH, nW, self.window_size, self.window_size, C) + .transpose(2, 3) + .reshape(B, pH, pW, C) + ) + + if padding: + x = x[:, :H, :W].contiguous() + + x = x.view(B, L, C) + + x = res_x + self.drop_path(x) + + x = x.transpose(1, 2).reshape(B, C, H, W) + x = self.local_conv(x) + x = x.view(B, C, L).transpose(1, 2) + + x = x + self.drop_path(self.mlp(x)) + return x + + def extra_repr(self) -> str: + return ( + f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " + f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}" + ) + + +class BasicLayer(nn.Module): + """A basic TinyViT layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3 + activation: the activation function. Default: nn.GELU + out_dim: the output dimension of the layer. Default: dim + """ + + def __init__( + self, + dim, + input_resolution, + depth, + num_heads, + window_size, + mlp_ratio=4.0, + drop=0.0, + drop_path=0.0, + downsample=None, + use_checkpoint=False, + local_conv_size=3, + activation=nn.GELU, + out_dim=None, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList( + [ + TinyViTBlock( + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + drop=drop, + drop_path=drop_path[i] + if isinstance(drop_path, list) + else drop_path, + local_conv_size=local_conv_size, + activation=activation, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, out_dim=out_dim, activation=activation + ) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class TinyViT(nn.Module): + def __init__( + self, + img_size=224, + in_chans=3, + num_classes=1000, + embed_dims=[96, 192, 384, 768], + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_sizes=[7, 7, 14, 7], + mlp_ratio=4.0, + drop_rate=0.0, + drop_path_rate=0.1, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=1.0, + ): + super().__init__() + self.img_size = img_size + self.num_classes = num_classes + self.depths = depths + self.num_layers = len(depths) + self.mlp_ratio = mlp_ratio + + activation = nn.GELU + + self.patch_embed = PatchEmbed( + in_chans=in_chans, + embed_dim=embed_dims[0], + resolution=img_size, + activation=activation, + ) + + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # stochastic depth + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + kwargs = dict( + dim=embed_dims[i_layer], + input_resolution=( + patches_resolution[0] + // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)), + patches_resolution[1] + // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)), + ), + # input_resolution=(patches_resolution[0] // (2 ** i_layer), + # patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + out_dim=embed_dims[min(i_layer + 1, len(embed_dims) - 1)], + activation=activation, + ) + if i_layer == 0: + layer = ConvLayer( + conv_expand_ratio=mbconv_expand_ratio, + **kwargs, + ) + else: + layer = BasicLayer( + num_heads=num_heads[i_layer], + window_size=window_sizes[i_layer], + mlp_ratio=self.mlp_ratio, + drop=drop_rate, + local_conv_size=local_conv_size, + **kwargs, + ) + self.layers.append(layer) + + # Classifier head + self.norm_head = nn.LayerNorm(embed_dims[-1]) + self.head = ( + nn.Linear(embed_dims[-1], num_classes) + if num_classes > 0 + else torch.nn.Identity() + ) + + # init weights + self.apply(self._init_weights) + self.set_layer_lr_decay(layer_lr_decay) + self.neck = nn.Sequential( + nn.Conv2d( + embed_dims[-1], + 256, + kernel_size=1, + bias=False, + ), + LayerNorm2d(256), + nn.Conv2d( + 256, + 256, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(256), + ) + + def set_layer_lr_decay(self, layer_lr_decay): + decay_rate = layer_lr_decay + + # layers -> blocks (depth) + depth = sum(self.depths) + lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)] + # print("LR SCALES:", lr_scales) + + def _set_lr_scale(m, scale): + for p in m.parameters(): + p.lr_scale = scale + + self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0])) + i = 0 + for layer in self.layers: + for block in layer.blocks: + block.apply(lambda x: _set_lr_scale(x, lr_scales[i])) + i += 1 + if layer.downsample is not None: + layer.downsample.apply(lambda x: _set_lr_scale(x, lr_scales[i - 1])) + assert i == depth + for m in [self.norm_head, self.head]: + m.apply(lambda x: _set_lr_scale(x, lr_scales[-1])) + + for k, p in self.named_parameters(): + p.param_name = k + + def _check_lr_scale(m): + for p in m.parameters(): + assert hasattr(p, "lr_scale"), p.param_name + + self.apply(_check_lr_scale) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {"attention_biases"} + + def forward_features(self, x): + # x: (N, C, H, W) + x = self.patch_embed(x) + + x = self.layers[0](x) + start_i = 1 + + for i in range(start_i, len(self.layers)): + layer = self.layers[i] + x = layer(x) + B, _, C = x.size() + x = x.view(B, 64, 64, C) + x = x.permute(0, 3, 1, 2) + x = self.neck(x) + return x + + def forward(self, x): + x = self.forward_features(x) + # x = self.norm_head(x) + # x = self.head(x) + return x diff --git a/lama_cleaner/plugins/segment_anything/modeling/transformer.py b/iopaint/plugins/segment_anything/modeling/transformer.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/modeling/transformer.py rename to iopaint/plugins/segment_anything/modeling/transformer.py diff --git a/lama_cleaner/plugins/segment_anything/predictor.py b/iopaint/plugins/segment_anything/predictor.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/predictor.py rename to iopaint/plugins/segment_anything/predictor.py diff --git a/lama_cleaner/plugins/segment_anything/utils/__init__.py b/iopaint/plugins/segment_anything/utils/__init__.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/utils/__init__.py rename to iopaint/plugins/segment_anything/utils/__init__.py diff --git a/lama_cleaner/plugins/segment_anything/utils/transforms.py b/iopaint/plugins/segment_anything/utils/transforms.py similarity index 100% rename from lama_cleaner/plugins/segment_anything/utils/transforms.py rename to iopaint/plugins/segment_anything/utils/transforms.py diff --git a/lama_cleaner/runtime.py b/iopaint/runtime.py similarity index 54% rename from lama_cleaner/runtime.py rename to iopaint/runtime.py index d8cc2e0..950716f 100644 --- a/lama_cleaner/runtime.py +++ b/iopaint/runtime.py @@ -1,10 +1,16 @@ # https://github.com/huggingface/huggingface_hub/blob/5a12851f54bf614be39614034ed3a9031922d297/src/huggingface_hub/utils/_runtime.py +import os import platform import sys +from pathlib import Path + import packaging.version +from loguru import logger from rich import print from typing import Dict, Any +from iopaint.const import Device + _PY_VERSION: str = sys.version.split()[0].rstrip("+") if packaging.version.Version(_PY_VERSION) < packaging.version.Version("3.8.0"): @@ -21,9 +27,8 @@ _CANDIDATES = [ "diffusers", "transformers", "opencv-python", - "xformers", "accelerate", - "lama-cleaner", + "iopaint", "rembg", "realesrgan", "gfpgan", @@ -38,7 +43,7 @@ for name in _CANDIDATES: def dump_environment_info() -> Dict[str, str]: - """Dump information about the machine to help debugging issues. """ + """Dump information about the machine to help debugging issues.""" # Generic machine info info: Dict[str, Any] = { @@ -48,3 +53,35 @@ def dump_environment_info() -> Dict[str, str]: info.update(_package_versions) print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]) + "\n") return info + + +def check_device(device: Device) -> Device: + if device == Device.cuda: + import platform + + if platform.system() == "Darwin": + logger.warning("MacOS does not support cuda, use cpu instead") + return Device.cpu + else: + import torch + + if not torch.cuda.is_available(): + logger.warning("CUDA is not available, use cpu instead") + return Device.cpu + elif device == Device.mps: + import torch + + if not torch.backends.mps.is_available(): + logger.warning("mps is not available, use cpu instead") + return Device.cpu + return device + + +def setup_model_dir(model_dir: Path): + model_dir = model_dir.expanduser().absolute() + os.environ["U2NET_HOME"] = str(model_dir) + os.environ["XDG_CACHE_HOME"] = str(model_dir) + if not model_dir.exists(): + logger.info(f"Create model directory: {model_dir}") + model_dir.mkdir(exist_ok=True, parents=True) + return model_dir diff --git a/iopaint/schema.py b/iopaint/schema.py new file mode 100644 index 0000000..6fd3980 --- /dev/null +++ b/iopaint/schema.py @@ -0,0 +1,337 @@ +import json +import random +from enum import Enum +from pathlib import Path +from typing import Optional, Literal, List + +from loguru import logger +from pydantic import BaseModel, Field, field_validator + + +class Choices(str, Enum): + @classmethod + def values(cls): + return [member.value for member in cls] + + +class RealESRGANModel(Choices): + realesr_general_x4v3 = "realesr-general-x4v3" + RealESRGAN_x4plus = "RealESRGAN_x4plus" + RealESRGAN_x4plus_anime_6B = "RealESRGAN_x4plus_anime_6B" + + +class Device(Choices): + cpu = "cpu" + cuda = "cuda" + mps = "mps" + + +class InteractiveSegModel(Choices): + vit_b = "vit_b" + vit_l = "vit_l" + vit_h = "vit_h" + mobile_sam = "mobile_sam" + + +class PluginInfo(BaseModel): + name: str + support_gen_image: bool = False + support_gen_mask: bool = False + + +class CV2Flag(str, Enum): + INPAINT_NS = "INPAINT_NS" + INPAINT_TELEA = "INPAINT_TELEA" + + +class ModelType(str, Enum): + INPAINT = "inpaint" # LaMa, MAT... + DIFFUSERS_SD = "diffusers_sd" + DIFFUSERS_SD_INPAINT = "diffusers_sd_inpaint" + DIFFUSERS_SDXL = "diffusers_sdxl" + DIFFUSERS_SDXL_INPAINT = "diffusers_sdxl_inpaint" + DIFFUSERS_OTHER = "diffusers_other" + + +class HDStrategy(str, Enum): + # Use original image size + ORIGINAL = "Original" + # Resize the longer side of the image to a specific size(hd_strategy_resize_limit), + # then do inpainting on the resized image. Finally, resize the inpainting result to the original size. + # The area outside the mask will not lose quality. + RESIZE = "Resize" + # Crop masking area(with a margin controlled by hd_strategy_crop_margin) from the original image to do inpainting + CROP = "Crop" + + +class LDMSampler(str, Enum): + ddim = "ddim" + plms = "plms" + + +class SDSampler(str, Enum): + dpm_plus_plus_2m = "DPM++ 2M" + dpm_plus_plus_2m_karras = "DPM++ 2M Karras" + dpm_plus_plus_2m_sde = "DPM++ 2M SDE" + dpm_plus_plus_2m_sde_karras = "DPM++ 2M SDE Karras" + dpm_plus_plus_sde = "DPM++ SDE" + dpm_plus_plus_sde_karras = "DPM++ SDE Karras" + dpm2 = "DPM2" + dpm2_karras = "DPM2 Karras" + dpm2_a = "DPM2 a" + dpm2_a_karras = "DPM2 a Karras" + euler = "Euler" + euler_a = "Euler a" + heun = "Heun" + lms = "LMS" + lms_karras = "LMS Karras" + + ddim = "DDIM" + pndm = "PNDM" + uni_pc = "UniPC" + lcm = "LCM" + + +class FREEUConfig(BaseModel): + s1: float = 0.9 + s2: float = 0.2 + b1: float = 1.2 + b2: float = 1.4 + + +class PowerPaintTask(str, Enum): + text_guided = "text-guided" + shape_guided = "shape-guided" + object_remove = "object-remove" + outpainting = "outpainting" + + +class ApiConfig(BaseModel): + host: str + port: int + model: str + no_half: bool + low_mem: bool + cpu_offload: bool + disable_nsfw_checker: bool + local_files_only: bool + cpu_textencoder: bool + device: Device + input: Optional[Path] + output_dir: Optional[Path] + quality: int + enable_interactive_seg: bool + interactive_seg_model: InteractiveSegModel + interactive_seg_device: Device + enable_remove_bg: bool + enable_anime_seg: bool + enable_realesrgan: bool + realesrgan_device: Device + realesrgan_model: RealESRGANModel + enable_gfpgan: bool + gfpgan_device: Device + enable_restoreformer: bool + restoreformer_device: Device + + +class InpaintRequest(BaseModel): + image: Optional[str] = Field(None, description="base64 encoded image") + mask: Optional[str] = Field(None, description="base64 encoded mask") + + ldm_steps: int = Field(20, description="Steps for ldm model.") + ldm_sampler: str = Field(LDMSampler.plms, discription="Sampler for ldm model.") + zits_wireframe: bool = Field(True, description="Enable wireframe for zits model.") + + hd_strategy: str = Field( + HDStrategy.CROP, + description="Different way to preprocess image, only used by erase models(e.g. lama/mat)", + ) + hd_strategy_crop_trigger_size: int = Field( + 800, + description="Crop trigger size for hd_strategy=CROP, if the longer side of the image is larger than this value, use crop strategy", + ) + hd_strategy_crop_margin: int = Field( + 128, description="Crop margin for hd_strategy=CROP" + ) + hd_strategy_resize_limit: int = Field( + 1280, description="Resize limit for hd_strategy=RESIZE" + ) + + prompt: str = Field("", description="Prompt for diffusion models.") + negative_prompt: str = Field( + "", description="Negative prompt for diffusion models." + ) + use_croper: bool = Field( + False, description="Crop image before doing diffusion inpainting" + ) + croper_x: int = Field(0, description="Crop x for croper") + croper_y: int = Field(0, description="Crop y for croper") + croper_height: int = Field(512, description="Crop height for croper") + croper_width: int = Field(512, description="Crop width for croper") + + use_extender: bool = Field( + False, description="Extend image before doing sd outpainting" + ) + extender_x: int = Field(0, description="Extend x for extender") + extender_y: int = Field(0, description="Extend y for extender") + extender_height: int = Field(640, description="Extend height for extender") + extender_width: int = Field(640, description="Extend width for extender") + + sd_scale: float = Field( + 1.0, + description="Resize the image before doing sd inpainting, the area outside the mask will not lose quality.", + gt=0.0, + le=1.0, + ) + sd_mask_blur: int = Field( + 11, + description="Blur the edge of mask area. The higher the number the smoother blend with the original image", + ) + sd_strength: float = Field( + 1.0, + description="Strength is a measure of how much noise is added to the base image, which influences how similar the output is to the base image. Higher value means more noise and more different from the base image", + le=1.0, + ) + sd_steps: int = Field( + 50, + description="The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.", + ) + sd_guidance_scale: float = Field( + 7.5, + help="Higher guidance scale encourages to generate images that are closely linked to the text prompt, usually at the expense of lower image quality.", + ) + sd_sampler: str = Field( + SDSampler.uni_pc, description="Sampler for diffusion model." + ) + sd_seed: int = Field( + 42, + description="Seed for diffusion model. -1 mean random seed", + validate_default=True, + ) + sd_match_histograms: bool = Field( + False, + description="Match histograms between inpainting area and original image.", + ) + + sd_outpainting_softness: float = Field(20.0) + sd_outpainting_space: float = Field(20.0) + + sd_freeu: bool = Field( + False, + description="Enable freeu mode. https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu", + ) + sd_freeu_config: FREEUConfig = FREEUConfig() + + sd_lcm_lora: bool = Field( + False, + description="Enable lcm-lora mode. https://huggingface.co/docs/diffusers/main/en/using-diffusers/inference_with_lcm#texttoimage", + ) + + sd_keep_unmasked_area: bool = Field( + True, description="Keep unmasked area unchanged" + ) + + cv2_flag: CV2Flag = Field( + CV2Flag.INPAINT_NS, + description="Flag for opencv inpainting: https://docs.opencv.org/4.6.0/d7/d8b/group__photo__inpaint.html#gga8002a65f5a3328fbf15df81b842d3c3ca05e763003a805e6c11c673a9f4ba7d07", + ) + cv2_radius: int = Field( + 4, + description="Radius of a circular neighborhood of each point inpainted that is considered by the algorithm", + ) + + # Paint by Example + paint_by_example_example_image: Optional[str] = Field( + None, description="Base64 encoded example image for paint by example model" + ) + + # InstructPix2Pix + p2p_image_guidance_scale: float = Field(1.5, description="Image guidance scale") + + # ControlNet + enable_controlnet: bool = Field(False, description="Enable controlnet") + controlnet_conditioning_scale: float = Field( + 0.4, description="Conditioning scale", ge=0.0, le=1.0 + ) + controlnet_method: str = Field( + "lllyasviel/control_v11p_sd15_canny", description="Controlnet method" + ) + + # PowerPaint + powerpaint_task: PowerPaintTask = Field( + PowerPaintTask.text_guided, description="PowerPaint task" + ) + fitting_degree: float = Field( + 1.0, + description="Control the fitting degree of the generated objects to the mask shape.", + gt=0.0, + le=1.0, + ) + + @field_validator("sd_seed") + @classmethod + def sd_seed_validator(cls, v: int) -> int: + if v == -1: + return random.randint(1, 99999999) + return v + + @field_validator("controlnet_conditioning_scale") + @classmethod + def validate_field(cls, v: float, values): + use_extender = values.data["use_extender"] + enable_controlnet = values.data["enable_controlnet"] + if use_extender and enable_controlnet: + logger.info(f"Extender is enabled, set controlnet_conditioning_scale=0") + return 0 + return v + + +class RunPluginRequest(BaseModel): + name: str + image: str = Field(..., description="base64 encoded image") + clicks: List[List[int]] = Field( + [], description="Clicks for interactive seg, [[x,y,0/1], [x2,y2,0/1]]" + ) + scale: float = Field(2.0, description="Scale for upscaling") + + +MediaTab = Literal["input", "output"] + + +class MediasResponse(BaseModel): + name: str + height: int + width: int + ctime: float + mtime: float + + +class GenInfoResponse(BaseModel): + prompt: str = "" + negative_prompt: str = "" + + +class ServerConfigResponse(BaseModel): + plugins: List[PluginInfo] + enableFileManager: bool + enableAutoSaving: bool + enableControlnet: bool + controlnetMethod: Optional[str] + disableModelSwitch: bool + isDesktop: bool + samplers: List[str] + + +class SwitchModelRequest(BaseModel): + name: str + + +AdjustMaskOperate = Literal["expand", "shrink", "reverse"] + + +class AdjustMaskRequest(BaseModel): + mask: str = Field( + ..., description="base64 encoded mask. 255 means area to do inpaint" + ) + operate: AdjustMaskOperate = Field(..., description="expand/shrink/reverse") + kernel_size: int = Field(5, description="Kernel size for expanding mask") diff --git a/iopaint/tests/.gitignore b/iopaint/tests/.gitignore new file mode 100644 index 0000000..89b7717 --- /dev/null +++ b/iopaint/tests/.gitignore @@ -0,0 +1,2 @@ +*_result.png +result/ \ No newline at end of file diff --git a/iopaint/tests/__init__.py b/iopaint/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lama_cleaner/tests/anime_test.png b/iopaint/tests/anime_test.png similarity index 100% rename from lama_cleaner/tests/anime_test.png rename to iopaint/tests/anime_test.png diff --git a/iopaint/tests/anytext_mask.jpg b/iopaint/tests/anytext_mask.jpg new file mode 100644 index 0000000..43d8b12 Binary files /dev/null and b/iopaint/tests/anytext_mask.jpg differ diff --git a/iopaint/tests/anytext_ref.jpg b/iopaint/tests/anytext_ref.jpg new file mode 100644 index 0000000..c36b3c5 Binary files /dev/null and b/iopaint/tests/anytext_ref.jpg differ diff --git a/lama_cleaner/tests/bunny.jpeg b/iopaint/tests/bunny.jpeg similarity index 100% rename from lama_cleaner/tests/bunny.jpeg rename to iopaint/tests/bunny.jpeg diff --git a/iopaint/tests/cat.png b/iopaint/tests/cat.png new file mode 100644 index 0000000..dee9eb6 Binary files /dev/null and b/iopaint/tests/cat.png differ diff --git a/iopaint/tests/icc_profile_test.jpg b/iopaint/tests/icc_profile_test.jpg new file mode 100644 index 0000000..b603ef9 Binary files /dev/null and b/iopaint/tests/icc_profile_test.jpg differ diff --git a/iopaint/tests/icc_profile_test.png b/iopaint/tests/icc_profile_test.png new file mode 100644 index 0000000..90d18ac Binary files /dev/null and b/iopaint/tests/icc_profile_test.png differ diff --git a/lama_cleaner/tests/image.png b/iopaint/tests/image.png similarity index 100% rename from lama_cleaner/tests/image.png rename to iopaint/tests/image.png diff --git a/lama_cleaner/tests/mask.png b/iopaint/tests/mask.png similarity index 100% rename from lama_cleaner/tests/mask.png rename to iopaint/tests/mask.png diff --git a/lama_cleaner/tests/overture-creations-5sI6fQgYIuo.png b/iopaint/tests/overture-creations-5sI6fQgYIuo.png similarity index 100% rename from lama_cleaner/tests/overture-creations-5sI6fQgYIuo.png rename to iopaint/tests/overture-creations-5sI6fQgYIuo.png diff --git a/iopaint/tests/overture-creations-5sI6fQgYIuo_all_mask.png b/iopaint/tests/overture-creations-5sI6fQgYIuo_all_mask.png new file mode 100644 index 0000000..e69de29 diff --git a/lama_cleaner/tests/overture-creations-5sI6fQgYIuo_mask.png b/iopaint/tests/overture-creations-5sI6fQgYIuo_mask.png similarity index 100% rename from lama_cleaner/tests/overture-creations-5sI6fQgYIuo_mask.png rename to iopaint/tests/overture-creations-5sI6fQgYIuo_mask.png diff --git a/lama_cleaner/tests/overture-creations-5sI6fQgYIuo_mask_blur.png b/iopaint/tests/overture-creations-5sI6fQgYIuo_mask_blur.png similarity index 100% rename from lama_cleaner/tests/overture-creations-5sI6fQgYIuo_mask_blur.png rename to iopaint/tests/overture-creations-5sI6fQgYIuo_mask_blur.png diff --git a/lama_cleaner/tests/pnginfo_test.png b/iopaint/tests/png_parameter_test.png similarity index 100% rename from lama_cleaner/tests/pnginfo_test.png rename to iopaint/tests/png_parameter_test.png diff --git a/iopaint/tests/test_adjust_mask.py b/iopaint/tests/test_adjust_mask.py new file mode 100644 index 0000000..1f01713 --- /dev/null +++ b/iopaint/tests/test_adjust_mask.py @@ -0,0 +1,17 @@ +import cv2 +from iopaint.helper import adjust_mask +from iopaint.tests.utils import current_dir, save_dir + +mask_p = current_dir / "overture-creations-5sI6fQgYIuo_mask.png" + + +def test_adjust_mask(): + mask = cv2.imread(str(mask_p), cv2.IMREAD_GRAYSCALE) + res_mask = adjust_mask(mask, 0, "expand") + cv2.imwrite(str(save_dir / "adjust_mask_original.png"), res_mask) + res_mask = adjust_mask(mask, 40, "expand") + cv2.imwrite(str(save_dir / "adjust_mask_expand.png"), res_mask) + res_mask = adjust_mask(mask, 20, "shrink") + cv2.imwrite(str(save_dir / "adjust_mask_shrink.png"), res_mask) + res_mask = adjust_mask(mask, 20, "reverse") + cv2.imwrite(str(save_dir / "adjust_mask_reverse.png"), res_mask) diff --git a/iopaint/tests/test_anytext.py b/iopaint/tests/test_anytext.py new file mode 100644 index 0000000..996176f --- /dev/null +++ b/iopaint/tests/test_anytext.py @@ -0,0 +1,45 @@ +import os + +from iopaint.tests.utils import check_device, get_config, assert_equal + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" +from pathlib import Path + +import pytest +import torch + +from iopaint.model_manager import ModelManager +from iopaint.schema import HDStrategy + +current_dir = Path(__file__).parent.absolute().resolve() +save_dir = current_dir / "result" +save_dir.mkdir(exist_ok=True, parents=True) + + +@pytest.mark.parametrize("device", ["cuda", "mps"]) +def test_anytext(device): + sd_steps = check_device(device) + model = ModelManager( + name="Sanster/AnyText", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt='Characters written in chalk on the blackboard that says "DADDY", best quality, extremely detailed,4k, HD, supper legible text, clear text edges, clear strokes, neat writing, no watermarks', + negative_prompt="low-res, bad anatomy, extra digit, fewer digits, cropped, worst quality, low quality, watermark, unreadable text, messy words, distorted text, disorganized writing, advertising picture", + sd_steps=sd_steps, + sd_guidance_scale=9.0, + sd_seed=66273235, + sd_match_histograms=True + ) + + assert_equal( + model, + cfg, + f"anytext.png", + img_p=current_dir / "anytext_ref.jpg", + mask_p=current_dir / "anytext_mask.jpg", + ) diff --git a/iopaint/tests/test_controlnet.py b/iopaint/tests/test_controlnet.py new file mode 100644 index 0000000..c271345 --- /dev/null +++ b/iopaint/tests/test_controlnet.py @@ -0,0 +1,118 @@ +import os + +from iopaint.const import SD_CONTROLNET_CHOICES +from iopaint.tests.utils import current_dir, check_device, get_config, assert_equal + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" +from pathlib import Path + +import pytest +import torch + +from iopaint.model_manager import ModelManager +from iopaint.schema import HDStrategy, SDSampler + + +model_name = "runwayml/stable-diffusion-inpainting" + + +def convert_controlnet_method_name(name): + return name.replace("/", "--") + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("controlnet_method", [SD_CONTROLNET_CHOICES[0]]) +def test_runway_sd_1_5(device, controlnet_method): + sd_steps = check_device(device) + + model = ModelManager( + name=model_name, + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=device == "cuda", + enable_controlnet=True, + controlnet_method=controlnet_method, + ) + + cfg = get_config( + prompt="a fox sitting on a bench", + sd_steps=sd_steps, + enable_controlnet=True, + controlnet_conditioning_scale=0.5, + controlnet_method=controlnet_method, + ) + name = f"device_{device}" + + assert_equal( + model, + cfg, + f"sd_controlnet_{convert_controlnet_method_name(controlnet_method)}_{name}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +def test_controlnet_switch(device): + sd_steps = check_device(device) + model = ModelManager( + name=model_name, + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + cpu_offload=True, + enable_controlnet=True, + controlnet_method="lllyasviel/control_v11p_sd15_canny", + ) + cfg = get_config( + prompt="a fox sitting on a bench", + sd_steps=sd_steps, + enable_controlnet=True, + controlnet_method="lllyasviel/control_v11f1p_sd15_depth", + ) + + assert_equal( + model, + cfg, + f"controlnet_switch_canny_to_depth_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fx=1.2 + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize( + "local_file", ["sd-v1-5-inpainting.ckpt", "v1-5-pruned-emaonly.safetensors"] +) +def test_local_file_path(device, local_file): + sd_steps = check_device(device) + + controlnet_kwargs = dict( + enable_controlnet=True, + controlnet_method=SD_CONTROLNET_CHOICES[0], + ) + + model = ModelManager( + name=local_file, + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + cpu_offload=True, + **controlnet_kwargs, + ) + cfg = get_config( + prompt="a fox sitting on a bench", + sd_steps=sd_steps, + **controlnet_kwargs, + ) + + name = f"device_{device}" + + assert_equal( + model, + cfg, + f"{convert_controlnet_method_name(controlnet_kwargs['controlnet_method'])}_local_model_{name}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) diff --git a/iopaint/tests/test_instruct_pix2pix.py b/iopaint/tests/test_instruct_pix2pix.py new file mode 100644 index 0000000..3b81974 --- /dev/null +++ b/iopaint/tests/test_instruct_pix2pix.py @@ -0,0 +1,40 @@ +from pathlib import Path + +import pytest +import torch + +from iopaint.model_manager import ModelManager +from iopaint.schema import HDStrategy +from iopaint.tests.utils import get_config, check_device, assert_equal, current_dir + +model_name = "timbrooks/instruct-pix2pix" + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("disable_nsfw", [True, False]) +@pytest.mark.parametrize("cpu_offload", [False, True]) +def test_instruct_pix2pix(device, disable_nsfw, cpu_offload): + sd_steps = check_device(device) + model = ModelManager( + name=model_name, + device=torch.device(device), + disable_nsfw=disable_nsfw, + sd_cpu_textencoder=False, + cpu_offload=cpu_offload, + ) + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="What if it were snowing?", + p2p_steps=sd_steps, + ) + + name = f"device_{device}_disnsfw_{disable_nsfw}_cpu_offload_{cpu_offload}" + + assert_equal( + model, + cfg, + f"instruct_pix2pix_{name}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fx=1.3, + ) diff --git a/lama_cleaner/tests/test_load_img.py b/iopaint/tests/test_load_img.py similarity index 79% rename from lama_cleaner/tests/test_load_img.py rename to iopaint/tests/test_load_img.py index 6028a60..f7071bf 100644 --- a/lama_cleaner/tests/test_load_img.py +++ b/iopaint/tests/test_load_img.py @@ -1,8 +1,6 @@ -from pathlib import Path +from iopaint.helper import load_img +from iopaint.tests.utils import current_dir -from lama_cleaner.helper import load_img - -current_dir = Path(__file__).parent.absolute().resolve() png_img_p = current_dir / "image.png" jpg_img_p = current_dir / "bunny.jpeg" diff --git a/iopaint/tests/test_low_mem.py b/iopaint/tests/test_low_mem.py new file mode 100644 index 0000000..70e8801 --- /dev/null +++ b/iopaint/tests/test_low_mem.py @@ -0,0 +1,131 @@ +import os + +from loguru import logger + +from iopaint.tests.utils import check_device, get_config, assert_equal, current_dir + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + +import pytest +import torch + +from iopaint.model_manager import ModelManager +from iopaint.schema import HDStrategy, SDSampler, FREEUConfig + + +@pytest.mark.parametrize("device", ["cuda", "mps"]) +def test_runway_sd_1_5_low_mem(device): + sd_steps = check_device(device) + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + low_mem=True, + ) + + all_samplers = [member.value for member in SDSampler.__members__.values()] + print(all_samplers) + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="a fox sitting on a bench", + sd_steps=sd_steps, + sd_sampler=SDSampler.ddim, + ) + + name = f"device_{device}" + + assert_equal( + model, + cfg, + f"runway_sd_{name}_low_mem.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("sampler", [SDSampler.lcm]) +def test_runway_sd_lcm_lora_low_mem(device, sampler): + check_device(device) + + sd_steps = 5 + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + low_mem=True, + ) + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="face of a fox, sitting on a bench", + sd_steps=sd_steps, + sd_guidance_scale=2, + sd_lcm_lora=True, + ) + cfg.sd_sampler = sampler + + assert_equal( + model, + cfg, + f"runway_sd_1_5_lcm_lora_device_{device}_low_mem.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_runway_sd_freeu(device, sampler): + sd_steps = check_device(device) + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + low_mem=True, + ) + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="face of a fox, sitting on a bench", + sd_steps=sd_steps, + sd_guidance_scale=7.5, + sd_freeu=True, + sd_freeu_config=FREEUConfig(), + ) + cfg.sd_sampler = sampler + + assert_equal( + model, + cfg, + f"runway_sd_1_5_freeu_device_{device}_low_mem.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_runway_norm_sd_model(device, strategy, sampler): + sd_steps = check_device(device) + model = ModelManager( + name="runwayml/stable-diffusion-v1-5", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + low_mem=True, + ) + cfg = get_config( + strategy=strategy, prompt="face of a fox, sitting on a bench", sd_steps=sd_steps + ) + cfg.sd_sampler = sampler + + assert_equal( + model, + cfg, + f"runway_{device}_norm_sd_model_device_{device}_low_mem.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) diff --git a/iopaint/tests/test_match_histograms.py b/iopaint/tests/test_match_histograms.py new file mode 100644 index 0000000..c20a283 --- /dev/null +++ b/iopaint/tests/test_match_histograms.py @@ -0,0 +1,36 @@ +import pytest +import torch + +from iopaint.model_manager import ModelManager +from iopaint.schema import SDSampler, HDStrategy +from iopaint.tests.utils import check_device, get_config, assert_equal, current_dir + + +@pytest.mark.parametrize("device", ["cuda", "mps"]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_sd_match_histograms(device, sampler): + sd_steps = check_device(device) + + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="face of a fox, sitting on a bench", + sd_steps=sd_steps, + sd_guidance_scale=7.5, + sd_lcm_lora=False, + sd_match_histograms=True, + sd_sampler=sampler + ) + + assert_equal( + model, + cfg, + f"runway_sd_1_5_device_{device}_match_histograms.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) diff --git a/iopaint/tests/test_model.py b/iopaint/tests/test_model.py new file mode 100644 index 0000000..dd84b12 --- /dev/null +++ b/iopaint/tests/test_model.py @@ -0,0 +1,160 @@ +import pytest +import torch + +from iopaint.model_manager import ModelManager +from iopaint.schema import HDStrategy, LDMSampler +from iopaint.tests.utils import assert_equal, get_config, current_dir, check_device + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize( + "strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP] +) +def test_lama(device, strategy): + check_device(device) + model = ModelManager(name="lama", device=device) + assert_equal( + model, + get_config(strategy=strategy), + f"lama_{strategy[0].upper() + strategy[1:]}_result.png", + ) + + fx = 1.3 + assert_equal( + model, + get_config(strategy=strategy), + f"lama_{strategy[0].upper() + strategy[1:]}_fx_{fx}_result.png", + fx=1.3, + ) + + +@pytest.mark.parametrize("device", ["cuda", "cpu"]) +@pytest.mark.parametrize( + "strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP] +) +@pytest.mark.parametrize("ldm_sampler", [LDMSampler.ddim, LDMSampler.plms]) +def test_ldm(device, strategy, ldm_sampler): + check_device(device) + model = ModelManager(name="ldm", device=device) + cfg = get_config(strategy=strategy, ldm_sampler=ldm_sampler) + assert_equal( + model, cfg, f"ldm_{strategy[0].upper() + strategy[1:]}_{ldm_sampler}_result.png" + ) + + fx = 1.3 + assert_equal( + model, + cfg, + f"ldm_{strategy[0].upper() + strategy[1:]}_{ldm_sampler}_fx_{fx}_result.png", + fx=fx, + ) + + +@pytest.mark.parametrize("device", ["cuda", "cpu"]) +@pytest.mark.parametrize( + "strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP] +) +@pytest.mark.parametrize("zits_wireframe", [False, True]) +def test_zits(device, strategy, zits_wireframe): + check_device(device) + model = ModelManager(name="zits", device=device) + cfg = get_config(strategy=strategy, zits_wireframe=zits_wireframe) + assert_equal( + model, + cfg, + f"zits_{strategy[0].upper() + strategy[1:]}_wireframe_{zits_wireframe}_result.png", + ) + + fx = 1.3 + assert_equal( + model, + cfg, + f"zits_{strategy.capitalize()}_wireframe_{zits_wireframe}_fx_{fx}_result.png", + fx=fx, + ) + + +@pytest.mark.parametrize("device", ["cuda", "cpu"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +@pytest.mark.parametrize("no_half", [True, False]) +def test_mat(device, strategy, no_half): + check_device(device) + model = ModelManager(name="mat", device=device, no_half=no_half) + cfg = get_config(strategy=strategy) + + assert_equal( + model, + cfg, + f"mat_{strategy.capitalize()}_result.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "cpu"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +def test_fcf(device, strategy): + check_device(device) + model = ModelManager(name="fcf", device=device) + cfg = get_config(strategy=strategy) + + assert_equal(model, cfg, f"fcf_{strategy.capitalize()}_result.png", fx=2, fy=2) + assert_equal(model, cfg, f"fcf_{strategy.capitalize()}_result.png", fx=3.8, fy=2) + + +@pytest.mark.parametrize( + "strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP] +) +@pytest.mark.parametrize("cv2_flag", ["INPAINT_NS", "INPAINT_TELEA"]) +@pytest.mark.parametrize("cv2_radius", [3, 15]) +def test_cv2(strategy, cv2_flag, cv2_radius): + model = ModelManager( + name="cv2", + device=torch.device("cpu"), + ) + cfg = get_config(strategy=strategy, cv2_flag=cv2_flag, cv2_radius=cv2_radius) + assert_equal( + model, + cfg, + f"cv2_{strategy.capitalize()}_{cv2_flag}_{cv2_radius}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "cpu"]) +@pytest.mark.parametrize( + "strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP] +) +def test_manga(device, strategy): + check_device(device) + model = ModelManager( + name="manga", + device=torch.device(device), + ) + cfg = get_config(strategy=strategy) + assert_equal( + model, + cfg, + f"manga_{strategy.capitalize()}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +def test_mi_gan(device, strategy): + check_device(device) + model = ModelManager( + name="migan", + device=torch.device(device), + ) + cfg = get_config(strategy=strategy) + assert_equal( + model, + cfg, + f"migan_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fx=1.5, + fy=1.7 + ) diff --git a/iopaint/tests/test_model_md5.py b/iopaint/tests/test_model_md5.py new file mode 100644 index 0000000..3a81d72 --- /dev/null +++ b/iopaint/tests/test_model_md5.py @@ -0,0 +1,16 @@ +def test_load_model(): + from iopaint.plugins import InteractiveSeg + from iopaint.model_manager import ModelManager + + interactive_seg_model = InteractiveSeg("vit_l", "cpu") + + models = ["lama", "ldm", "zits", "mat", "fcf", "manga", "migan"] + for m in models: + ModelManager( + name=m, + device="cpu", + no_half=False, + disable_nsfw=False, + sd_cpu_textencoder=True, + cpu_offload=True, + ) diff --git a/iopaint/tests/test_model_switch.py b/iopaint/tests/test_model_switch.py new file mode 100644 index 0000000..735e1bd --- /dev/null +++ b/iopaint/tests/test_model_switch.py @@ -0,0 +1,70 @@ +import os + +from iopaint.schema import InpaintRequest + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + +import torch + +from iopaint.model_manager import ModelManager + + +def test_model_switch(): + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + enable_controlnet=True, + controlnet_method="lllyasviel/control_v11p_sd15_canny", + device=torch.device("mps"), + disable_nsfw=True, + sd_cpu_textencoder=True, + cpu_offload=False, + ) + + model.switch("lama") + + +def test_controlnet_switch_onoff(caplog): + name = "runwayml/stable-diffusion-inpainting" + model = ModelManager( + name=name, + enable_controlnet=True, + controlnet_method="lllyasviel/control_v11p_sd15_canny", + device=torch.device("mps"), + disable_nsfw=True, + sd_cpu_textencoder=True, + cpu_offload=False, + ) + + model.switch_controlnet_method( + InpaintRequest( + name=name, + enable_controlnet=False, + ) + ) + + assert "Disable controlnet" in caplog.text + + +def test_switch_controlnet_method(caplog): + name = "runwayml/stable-diffusion-inpainting" + old_method = "lllyasviel/control_v11p_sd15_canny" + new_method = "lllyasviel/control_v11p_sd15_openpose" + model = ModelManager( + name=name, + enable_controlnet=True, + controlnet_method=old_method, + device=torch.device("mps"), + disable_nsfw=True, + sd_cpu_textencoder=True, + cpu_offload=False, + ) + + model.switch_controlnet_method( + InpaintRequest( + name=name, + enable_controlnet=True, + controlnet_method=new_method, + ) + ) + + assert f"Switch Controlnet method from {old_method} to {new_method}" in caplog.text diff --git a/iopaint/tests/test_outpainting.py b/iopaint/tests/test_outpainting.py new file mode 100644 index 0000000..024d701 --- /dev/null +++ b/iopaint/tests/test_outpainting.py @@ -0,0 +1,138 @@ +import os + +from iopaint.tests.utils import current_dir, check_device + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" +from pathlib import Path + +import pytest +import torch + +from iopaint.model_manager import ModelManager +from iopaint.schema import HDStrategy, SDSampler +from iopaint.tests.test_model import get_config, assert_equal + + +@pytest.mark.parametrize("name", ["runwayml/stable-diffusion-inpainting"]) +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize( + "rect", + [ + [0, -100, 512, 512 - 128 + 100], + [0, 128, 512, 512 - 128 + 100], + [128, 0, 512 - 128 + 100, 512], + [-100, 0, 512 - 128 + 100, 512], + [0, 0, 512, 512 + 200], + [0, 0, 512 + 200, 512], + [-100, -100, 512 + 200, 512 + 200], + ], +) +def test_outpainting(name, device, rect): + sd_steps = check_device(device) + + model = ModelManager( + name=name, + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + cfg = get_config( + prompt="a dog sitting on a bench in the park", + sd_steps=sd_steps, + use_extender=True, + extender_x=rect[0], + extender_y=rect[1], + extender_width=rect[2], + extender_height=rect[3], + sd_guidance_scale=8.0, + sd_sampler=SDSampler.dpm_plus_plus_2m, + ) + + assert_equal( + model, + cfg, + f"{name.replace('/', '--')}_outpainting_{'_'.join(map(str, rect))}_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("name", ["kandinsky-community/kandinsky-2-2-decoder-inpaint"]) +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize( + "rect", + [ + [-128, -128, 768, 768], + ], +) +def test_kandinsky_outpainting(name, device, rect): + sd_steps = check_device(device) + + model = ModelManager( + name=name, + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + cfg = get_config( + prompt="a cat", + negative_prompt="lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature", + sd_steps=sd_steps, + use_extender=True, + extender_x=rect[0], + extender_y=rect[1], + extender_width=rect[2], + extender_height=rect[3], + sd_guidance_scale=7, + sd_sampler=SDSampler.dpm_plus_plus_2m, + ) + + assert_equal( + model, + cfg, + f"{name.replace('/', '--')}_outpainting_{'_'.join(map(str, rect))}_device_{device}.png", + img_p=current_dir / "cat.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fx=1, + fy=1, + ) + + +@pytest.mark.parametrize("name", ["Sanster/PowerPaint-V1-stable-diffusion-inpainting"]) +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize( + "rect", + [ + [-100, -100, 512 + 200, 512 + 200], + ], +) +def test_powerpaint_outpainting(name, device, rect): + sd_steps = check_device(device) + + model = ModelManager( + name=name, + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + low_mem=True + ) + cfg = get_config( + prompt="a dog sitting on a bench in the park", + sd_steps=sd_steps, + use_extender=True, + extender_x=rect[0], + extender_y=rect[1], + extender_width=rect[2], + extender_height=rect[3], + sd_guidance_scale=8.0, + sd_sampler=SDSampler.dpm_plus_plus_2m, + powerpaint_task="outpainting", + ) + + assert_equal( + model, + cfg, + f"{name.replace('/', '--')}_outpainting_{'_'.join(map(str, rect))}_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) diff --git a/iopaint/tests/test_paint_by_example.py b/iopaint/tests/test_paint_by_example.py new file mode 100644 index 0000000..27b8a77 --- /dev/null +++ b/iopaint/tests/test_paint_by_example.py @@ -0,0 +1,55 @@ +import cv2 +import pytest +from PIL import Image + +from iopaint.model_manager import ModelManager +from iopaint.schema import HDStrategy +from iopaint.tests.utils import ( + current_dir, + get_config, + get_data, + save_dir, + check_device, +) + +model_name = "Fantasy-Studio/Paint-by-Example" + + +def assert_equal( + model, + config, + save_name: str, + fx: float = 1, + fy: float = 1, + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + example_p=current_dir / "bunny.jpeg", +): + img, mask = get_data(fx=fx, fy=fy, img_p=img_p, mask_p=mask_p) + + example_image = cv2.imread(str(example_p)) + example_image = cv2.cvtColor(example_image, cv2.COLOR_BGRA2RGB) + example_image = cv2.resize( + example_image, None, fx=fx, fy=fy, interpolation=cv2.INTER_AREA + ) + + print(f"Input image shape: {img.shape}, example_image: {example_image.shape}") + config.paint_by_example_example_image = Image.fromarray(example_image) + res = model(img, mask, config) + cv2.imwrite(str(save_dir / save_name), res) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +def test_paint_by_example(device): + sd_steps = check_device(device) + model = ModelManager(name=model_name, device=device, disable_nsfw=True) + cfg = get_config(strategy=HDStrategy.ORIGINAL, sd_steps=sd_steps) + assert_equal( + model, + cfg, + f"paint_by_example_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fy=0.9, + fx=1.3, + ) diff --git a/iopaint/tests/test_plugins.py b/iopaint/tests/test_plugins.py new file mode 100644 index 0000000..c481cb1 --- /dev/null +++ b/iopaint/tests/test_plugins.py @@ -0,0 +1,120 @@ +import hashlib +import os +import time +from PIL import Image + +from iopaint.helper import encode_pil_to_base64, gen_frontend_mask +from iopaint.plugins.anime_seg import AnimeSeg +from iopaint.schema import RunPluginRequest +from iopaint.tests.utils import check_device, current_dir, save_dir + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + +import cv2 +import pytest + +from iopaint.plugins import ( + RemoveBG, + RealESRGANUpscaler, + GFPGANPlugin, + RestoreFormerPlugin, + InteractiveSeg, +) + +img_p = current_dir / "bunny.jpeg" +img_bytes = open(img_p, "rb").read() +bgr_img = cv2.imread(str(img_p)) +rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB) +rgb_img_base64 = encode_pil_to_base64(Image.fromarray(rgb_img), 100, {}) +bgr_img_base64 = encode_pil_to_base64(Image.fromarray(bgr_img), 100, {}) + + +def _save(img, name): + cv2.imwrite(str(save_dir / name), img) + + +def test_remove_bg(): + model = RemoveBG() + rgba_np_img = model.gen_image( + rgb_img, RunPluginRequest(name=RemoveBG.name, image=rgb_img_base64) + ) + res = cv2.cvtColor(rgba_np_img, cv2.COLOR_RGBA2BGRA) + _save(res, "test_remove_bg.png") + + bgr_np_img = model.gen_mask( + rgb_img, RunPluginRequest(name=RemoveBG.name, image=rgb_img_base64) + ) + + res_mask = gen_frontend_mask(bgr_np_img) + _save(res_mask, "test_remove_bg_frontend_mask.png") + + assert len(bgr_np_img.shape) == 2 + _save(bgr_np_img, "test_remove_bg_mask.jpeg") + + +def test_anime_seg(): + model = AnimeSeg() + img = cv2.imread(str(current_dir / "anime_test.png")) + img_base64 = encode_pil_to_base64(Image.fromarray(img), 100, {}) + res = model.gen_image(img, RunPluginRequest(name=AnimeSeg.name, image=img_base64)) + assert len(res.shape) == 3 + assert res.shape[-1] == 4 + _save(res, "test_anime_seg.png") + + res = model.gen_mask(img, RunPluginRequest(name=AnimeSeg.name, image=img_base64)) + assert len(res.shape) == 2 + _save(res, "test_anime_seg_mask.png") + + +@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"]) +def test_upscale(device): + check_device(device) + model = RealESRGANUpscaler("realesr-general-x4v3", device) + res = model.gen_image( + rgb_img, + RunPluginRequest(name=RealESRGANUpscaler.name, image=rgb_img_base64, scale=2), + ) + _save(res, f"test_upscale_x2_{device}.png") + + res = model.gen_image( + rgb_img, + RunPluginRequest(name=RealESRGANUpscaler.name, image=rgb_img_base64, scale=4), + ) + _save(res, f"test_upscale_x4_{device}.png") + + +@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"]) +def test_gfpgan(device): + check_device(device) + model = GFPGANPlugin(device) + res = model.gen_image( + rgb_img, RunPluginRequest(name=GFPGANPlugin.name, image=rgb_img_base64) + ) + _save(res, f"test_gfpgan_{device}.png") + + +@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"]) +def test_restoreformer(device): + check_device(device) + model = RestoreFormerPlugin(device) + res = model.gen_image( + rgb_img, RunPluginRequest(name=RestoreFormerPlugin.name, image=rgb_img_base64) + ) + _save(res, f"test_restoreformer_{device}.png") + + +@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"]) +def test_segment_anything(device): + check_device(device) + model = InteractiveSeg("vit_l", device) + new_mask = model.gen_mask( + rgb_img, + RunPluginRequest( + name=InteractiveSeg.name, + image=rgb_img_base64, + clicks=([[448 // 2, 394 // 2, 1]]), + ), + ) + + save_name = f"test_segment_anything_{device}.png" + _save(new_mask, save_name) diff --git a/iopaint/tests/test_save_exif.py b/iopaint/tests/test_save_exif.py new file mode 100644 index 0000000..5c19810 --- /dev/null +++ b/iopaint/tests/test_save_exif.py @@ -0,0 +1,59 @@ +import io +import tempfile +from pathlib import Path +from typing import List + +from PIL import Image + +from iopaint.helper import pil_to_bytes, load_img + +current_dir = Path(__file__).parent.absolute().resolve() + + +def print_exif(exif): + for k, v in exif.items(): + print(f"{k}: {v}") + + +def extra_info(img_p: Path): + ext = img_p.suffix.strip(".") + img_bytes = img_p.read_bytes() + np_img, _, infos = load_img(img_bytes, False, True) + res_pil_bytes = pil_to_bytes(Image.fromarray(np_img), ext=ext, infos=infos) + res_img = Image.open(io.BytesIO(res_pil_bytes)) + return infos, res_img.info, res_pil_bytes + + +def assert_keys(keys: List[str], infos, res_infos): + for k in keys: + assert k in infos + assert k in res_infos + assert infos[k] == res_infos[k] + + +def run_test(file_path, keys): + infos, res_infos, res_pil_bytes = extra_info(file_path) + assert_keys(keys, infos, res_infos) + with tempfile.NamedTemporaryFile("wb", suffix=file_path.suffix) as temp_file: + temp_file.write(res_pil_bytes) + temp_file.flush() + infos, res_infos, res_pil_bytes = extra_info(Path(temp_file.name)) + assert_keys(keys, infos, res_infos) + + +def test_png_icc_profile_png(): + run_test(current_dir / "icc_profile_test.png", ["icc_profile", "exif"]) + + +def test_png_icc_profile_jpeg(): + run_test(current_dir / "icc_profile_test.jpg", ["icc_profile", "exif"]) + + +def test_jpeg(): + jpg_img_p = current_dir / "bunny.jpeg" + run_test(jpg_img_p, ["dpi", "exif"]) + + +def test_png_parameter(): + jpg_img_p = current_dir / "png_parameter_test.png" + run_test(jpg_img_p, ["parameters"]) diff --git a/iopaint/tests/test_sd_model.py b/iopaint/tests/test_sd_model.py new file mode 100644 index 0000000..6865e5a --- /dev/null +++ b/iopaint/tests/test_sd_model.py @@ -0,0 +1,265 @@ +import os + +from loguru import logger + +from iopaint.tests.utils import check_device, get_config, assert_equal + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" +from pathlib import Path + +import pytest +import torch + +from iopaint.model_manager import ModelManager +from iopaint.schema import HDStrategy, SDSampler, FREEUConfig + +current_dir = Path(__file__).parent.absolute().resolve() +save_dir = current_dir / "result" +save_dir.mkdir(exist_ok=True, parents=True) + + +@pytest.mark.parametrize("device", ["cuda", "mps"]) +def test_runway_sd_1_5_all_samplers(device): + sd_steps = check_device(device) + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + + all_samplers = [member.value for member in SDSampler.__members__.values()] + print(all_samplers) + for sampler in all_samplers: + print(f"Testing sampler {sampler}") + if ( + sampler + in [SDSampler.dpm2_karras, SDSampler.dpm2_a_karras, SDSampler.lms_karras] + and device == "mps" + ): + # diffusers 0.25.0 still has bug on these sampler on mps, wait main branch released to fix it + logger.warning( + "skip dpm2_karras on mps, diffusers does not support it on mps. TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ) + continue + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="a fox sitting on a bench", + sd_steps=sd_steps, + sd_sampler=sampler, + ) + + name = f"device_{device}_{sampler}" + + assert_equal( + model, + cfg, + f"runway_sd_{name}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("sampler", [SDSampler.lcm]) +def test_runway_sd_lcm_lora(device, sampler): + check_device(device) + + sd_steps = 5 + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="face of a fox, sitting on a bench", + sd_steps=sd_steps, + sd_guidance_scale=2, + sd_lcm_lora=True, + ) + cfg.sd_sampler = sampler + + assert_equal( + model, + cfg, + f"runway_sd_1_5_lcm_lora_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_runway_sd_freeu(device, sampler): + sd_steps = check_device(device) + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="face of a fox, sitting on a bench", + sd_steps=sd_steps, + sd_guidance_scale=7.5, + sd_freeu=True, + sd_freeu_config=FREEUConfig(), + ) + cfg.sd_sampler = sampler + + assert_equal( + model, + cfg, + f"runway_sd_1_5_freeu_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_runway_sd_sd_strength(device, strategy, sampler): + sd_steps = check_device(device) + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + cfg = get_config( + strategy=strategy, + prompt="a fox sitting on a bench", + sd_steps=sd_steps, + sd_strength=0.8, + ) + cfg.sd_sampler = sampler + + assert_equal( + model, + cfg, + f"runway_sd_strength_0.8_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "cpu"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_runway_sd_cpu_textencoder(device, strategy, sampler): + sd_steps = check_device(device) + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=True, + ) + cfg = get_config( + strategy=strategy, + prompt="a fox sitting on a bench", + sd_steps=sd_steps, + sd_sampler=sampler, + ) + + assert_equal( + model, + cfg, + f"runway_sd_device_{device}_cpu_textencoder.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_runway_norm_sd_model(device, strategy, sampler): + sd_steps = check_device(device) + model = ModelManager( + name="runwayml/stable-diffusion-v1-5", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + cfg = get_config( + strategy=strategy, prompt="face of a fox, sitting on a bench", sd_steps=sd_steps + ) + cfg.sd_sampler = sampler + + assert_equal( + model, + cfg, + f"runway_{device}_norm_sd_model_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +@pytest.mark.parametrize("sampler", [SDSampler.dpm_plus_plus_2m]) +def test_runway_sd_1_5_cpu_offload(device, strategy, sampler): + sd_steps = check_device(device) + model = ModelManager( + name="runwayml/stable-diffusion-inpainting", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + cpu_offload=True, + ) + cfg = get_config( + strategy=strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps + ) + cfg.sd_sampler = sampler + + name = f"device_{device}_{sampler}" + + assert_equal( + model, + cfg, + f"runway_sd_{strategy.capitalize()}_{name}_cpu_offload.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +@pytest.mark.parametrize( + "name", + [ + "sd-v1-5-inpainting.safetensors", + "v1-5-pruned-emaonly.safetensors", + "sd_xl_base_1.0.safetensors", + "sd_xl_base_1.0_inpainting_0.1.safetensors", + ], +) +def test_local_file_path(device, sampler, name): + sd_steps = check_device(device) + model = ModelManager( + name=name, + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + cpu_offload=False, + ) + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="a fox sitting on a bench", + sd_steps=sd_steps, + ) + cfg.sd_sampler = sampler + + name = f"device_{device}_{sampler}_{name}" + + assert_equal( + model, + cfg, + f"sd_local_model_{name}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + ) diff --git a/iopaint/tests/test_sdxl.py b/iopaint/tests/test_sdxl.py new file mode 100644 index 0000000..e236948 --- /dev/null +++ b/iopaint/tests/test_sdxl.py @@ -0,0 +1,172 @@ +import os + +from iopaint.tests.utils import check_device, current_dir + +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + +import pytest +import torch + +from iopaint.model_manager import ModelManager +from iopaint.schema import HDStrategy, SDSampler, FREEUConfig +from iopaint.tests.test_model import get_config, assert_equal + + +@pytest.mark.parametrize("device", ["cuda", "mps"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_sdxl(device, strategy, sampler): + sd_steps = check_device(device) + + model = ModelManager( + name="diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + cfg = get_config( + strategy=strategy, + prompt="face of a fox, sitting on a bench", + sd_steps=sd_steps, + sd_strength=1.0, + sd_guidance_scale=7.0, + ) + cfg.sd_sampler = sampler + + assert_equal( + model, + cfg, + f"sdxl_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fx=2, + fy=2, + ) + + +@pytest.mark.parametrize("device", ["cuda", "cpu"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_sdxl_cpu_text_encoder(device, strategy, sampler): + sd_steps = check_device(device) + + model = ModelManager( + name="diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=True, + ) + cfg = get_config( + strategy=strategy, + prompt="face of a fox, sitting on a bench", + sd_steps=sd_steps, + sd_strength=1.0, + sd_guidance_scale=7.0, + ) + cfg.sd_sampler = sampler + + assert_equal( + model, + cfg, + f"sdxl_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fx=2, + fy=2, + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps"]) +@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL]) +@pytest.mark.parametrize("sampler", [SDSampler.ddim]) +def test_sdxl_lcm_lora_and_freeu(device, strategy, sampler): + sd_steps = check_device(device) + + model = ModelManager( + name="diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + cfg = get_config( + strategy=strategy, + prompt="face of a fox, sitting on a bench", + sd_steps=sd_steps, + sd_strength=1.0, + sd_guidance_scale=2.0, + sd_lcm_lora=True, + ) + cfg.sd_sampler = sampler + + name = f"device_{device}_{sampler}" + + assert_equal( + model, + cfg, + f"sdxl_{name}_lcm_lora.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fx=2, + fy=2, + ) + + cfg = get_config( + strategy=strategy, + prompt="face of a fox, sitting on a bench", + sd_steps=sd_steps, + sd_guidance_scale=7.5, + sd_freeu=True, + sd_freeu_config=FREEUConfig(), + ) + + assert_equal( + model, + cfg, + f"sdxl_{name}_freeu_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fx=2, + fy=2, + ) + + +@pytest.mark.parametrize("device", ["cuda", "mps"]) +@pytest.mark.parametrize( + "rect", + [ + [-128, -128, 1024, 1024], + ], +) +def test_sdxl_outpainting(device, rect): + sd_steps = check_device(device) + + model = ModelManager( + name="diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + device=torch.device(device), + disable_nsfw=True, + sd_cpu_textencoder=False, + ) + + cfg = get_config( + strategy=HDStrategy.ORIGINAL, + prompt="a dog sitting on a bench in the park", + sd_steps=sd_steps, + use_extender=True, + extender_x=rect[0], + extender_y=rect[1], + extender_width=rect[2], + extender_height=rect[3], + sd_strength=1.0, + sd_guidance_scale=8.0, + sd_sampler=SDSampler.ddim, + ) + + assert_equal( + model, + cfg, + f"sdxl_outpainting_dog_ddim_{'_'.join(map(str, rect))}_device_{device}.png", + img_p=current_dir / "overture-creations-5sI6fQgYIuo.png", + mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png", + fx=1.5, + fy=1.5, + ) diff --git a/iopaint/tests/utils.py b/iopaint/tests/utils.py new file mode 100644 index 0000000..08f4aeb --- /dev/null +++ b/iopaint/tests/utils.py @@ -0,0 +1,77 @@ +from pathlib import Path +import cv2 +import pytest +import torch + +from iopaint.helper import encode_pil_to_base64 +from iopaint.schema import LDMSampler, HDStrategy, InpaintRequest, SDSampler +from PIL import Image + +current_dir = Path(__file__).parent.absolute().resolve() +save_dir = current_dir / "result" +save_dir.mkdir(exist_ok=True, parents=True) + + +def check_device(device: str) -> int: + if device == "cuda" and not torch.cuda.is_available(): + pytest.skip("CUDA is not available, skip test on cuda") + if device == "mps" and not torch.backends.mps.is_available(): + pytest.skip("mps is not available, skip test on mps") + steps = 2 if device == "cpu" else 20 + return steps + + +def assert_equal( + model, + config: InpaintRequest, + gt_name, + fx: float = 1, + fy: float = 1, + img_p=current_dir / "image.png", + mask_p=current_dir / "mask.png", +): + img, mask = get_data(fx=fx, fy=fy, img_p=img_p, mask_p=mask_p) + print(f"Input image shape: {img.shape}") + res = model(img, mask, config) + ok = cv2.imwrite( + str(save_dir / gt_name), + res, + [int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0], + ) + assert ok, save_dir / gt_name + + """ + Note that JPEG is lossy compression, so even if it is the highest quality 100, + when the saved images is reloaded, a difference occurs with the original pixel value. + If you want to save the original images as it is, save it as PNG or BMP. + """ + # gt = cv2.imread(str(current_dir / gt_name), cv2.IMREAD_UNCHANGED) + # assert np.array_equal(res, gt) + + +def get_data( + fx: float = 1, + fy: float = 1.0, + img_p=current_dir / "image.png", + mask_p=current_dir / "mask.png", +): + img = cv2.imread(str(img_p)) + img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB) + mask = cv2.imread(str(mask_p), cv2.IMREAD_GRAYSCALE) + img = cv2.resize(img, None, fx=fx, fy=fy, interpolation=cv2.INTER_AREA) + mask = cv2.resize(mask, None, fx=fx, fy=fy, interpolation=cv2.INTER_NEAREST) + return img, mask + + +def get_config(**kwargs): + data = dict( + sd_sampler=kwargs.get("sd_sampler", SDSampler.uni_pc), + ldm_steps=1, + ldm_sampler=LDMSampler.plms, + hd_strategy=kwargs.get("strategy", HDStrategy.ORIGINAL), + hd_strategy_crop_margin=32, + hd_strategy_crop_trigger_size=200, + hd_strategy_resize_limit=200, + ) + data.update(**kwargs) + return InpaintRequest(image="", mask="", **data) diff --git a/lama_cleaner/web_config.py b/iopaint/web_config.py similarity index 60% rename from lama_cleaner/web_config.py rename to iopaint/web_config.py index 25d53c5..948957d 100644 --- a/lama_cleaner/web_config.py +++ b/iopaint/web_config.py @@ -1,32 +1,43 @@ -import json -import os from datetime import datetime +from json import JSONDecodeError import gradio as gr from loguru import logger -from lama_cleaner.const import * +from iopaint.const import * -_config_file = None + +_config_file: Path = None + + +class WebConfig(ApiConfig): + model_dir: str = DEFAULT_MODEL_DIR + + +def load_config(p: Path) -> WebConfig: + if p.exists(): + with open(p, "r", encoding="utf-8") as f: + try: + return WebConfig(**{**default_configs, **json.load(f)}) + except JSONDecodeError: + print(f"Load config file failed, using default configs") + return WebConfig(**default_configs) + else: + return WebConfig(**default_configs) def save_config( host, port, model, - sd_local_model_path, - sd_controlnet, - sd_controlnet_method, - device, - gui, - no_gui_auto_close, - no_half, - cpu_offload, - disable_nsfw, - sd_cpu_textencoder, - enable_xformers, - local_files_only, model_dir, + no_half, + low_mem, + cpu_offload, + disable_nsfw_checker, + local_files_only, + cpu_textencoder, + device, input, output_dir, quality, @@ -42,33 +53,29 @@ def save_config( gfpgan_device, enable_restoreformer, restoreformer_device, - enable_gif, ): - config = Config(**locals()) - print(config) + config = WebConfig(**locals()) + if str(config.input) == ".": + config.input = None + if str(config.output_dir) == ".": + config.output_dir = None + + print(config.model_dump_json(indent=4)) if config.input and not os.path.exists(config.input): return "[Error] Input file or directory does not exist" current_time = datetime.now().strftime("%H:%M:%S") - msg = f"[{current_time}] Successful save config to: {os.path.abspath(_config_file)}" + msg = f"[{current_time}] Successful save config to: {str(_config_file.absolute())}" logger.info(msg) try: with open(_config_file, "w", encoding="utf-8") as f: - json.dump(config.dict(), f, indent=4, ensure_ascii=False) + f.write(config.model_dump_json(indent=4)) except Exception as e: - return f"Save failed: {str(e)}" + return f"Save configure file failed: {str(e)}" return msg -def close_server(*args): - # TODO: make close both browser and server works - import os, signal - - pid = os.getpid() - os.kill(pid, signal.SIGUSR1) - - -def main(config_file: str): +def main(config_file: Path): global _config_file _config_file = config_file @@ -76,7 +83,9 @@ def main(config_file: str): with gr.Blocks() as demo: with gr.Row(): - with gr.Column(scale=1): + with gr.Column(): + gr.Textbox(config_file, label="Config file", interactive=False) + with gr.Column(): save_btn = gr.Button(value="Save configurations") message = gr.HTML() @@ -87,10 +96,12 @@ def main(config_file: str): port = gr.Number(init_config.port, label="Port", precision=0) model = gr.Radio( - AVAILABLE_MODELS, label="Model", value=init_config.model + AVAILABLE_MODELS + DIFFUSION_MODELS, + label="Models (https://www.iopaint.com/models)", + value=init_config.model, ) device = gr.Radio( - AVAILABLE_DEVICES, label="Device", value=init_config.device + Device.values(), label="Device", value=init_config.device ) quality = gr.Slider( value=95, @@ -100,11 +111,20 @@ def main(config_file: str): step=1, ) - with gr.Column(): - gui = gr.Checkbox(init_config.gui, label=f"{GUI_HELP}") - no_gui_auto_close = gr.Checkbox( - init_config.no_gui_auto_close, label=f"{NO_GUI_AUTO_CLOSE_HELP}" - ) + no_half = gr.Checkbox(init_config.no_half, label=f"{NO_HALF_HELP}") + cpu_offload = gr.Checkbox( + init_config.cpu_offload, label=f"{CPU_OFFLOAD_HELP}" + ) + low_mem = gr.Checkbox(init_config.low_mem, label=f"{LOW_MEM_HELP}") + cpu_textencoder = gr.Checkbox( + init_config.cpu_textencoder, label=f"{CPU_TEXTENCODER_HELP}" + ) + disable_nsfw_checker = gr.Checkbox( + init_config.disable_nsfw_checker, label=f"{DISABLE_NSFW_HELP}" + ) + local_files_only = gr.Checkbox( + init_config.local_files_only, label=f"{LOCAL_FILES_ONLY_HELP}" + ) with gr.Column(): model_dir = gr.Textbox( @@ -120,19 +140,20 @@ def main(config_file: str): ) with gr.Tab("Plugins"): - enable_interactive_seg = gr.Checkbox( - init_config.enable_interactive_seg, label=INTERACTIVE_SEG_HELP - ) - interactive_seg_model = gr.Radio( - AVAILABLE_INTERACTIVE_SEG_MODELS, - label=f"Segment Anything models. {INTERACTIVE_SEG_MODEL_HELP}", - value=init_config.interactive_seg_model, - ) - interactive_seg_device = gr.Radio( - AVAILABLE_INTERACTIVE_SEG_DEVICES, - label="Segment Anything Device", - value=init_config.interactive_seg_device, - ) + with gr.Row(): + enable_interactive_seg = gr.Checkbox( + init_config.enable_interactive_seg, label=INTERACTIVE_SEG_HELP + ) + interactive_seg_model = gr.Radio( + InteractiveSegModel.values(), + label=f"Segment Anything models. {INTERACTIVE_SEG_MODEL_HELP}", + value=init_config.interactive_seg_model, + ) + interactive_seg_device = gr.Radio( + Device.values(), + label="Segment Anything Device", + value=init_config.interactive_seg_device, + ) with gr.Row(): enable_remove_bg = gr.Checkbox( init_config.enable_remove_bg, label=REMOVE_BG_HELP @@ -147,12 +168,12 @@ def main(config_file: str): init_config.enable_realesrgan, label=REALESRGAN_HELP ) realesrgan_device = gr.Radio( - REALESRGAN_AVAILABLE_DEVICES, + Device.values(), label="RealESRGAN Device", value=init_config.realesrgan_device, ) realesrgan_model = gr.Radio( - RealESRGANModelNameList, + RealESRGANModel.values(), label="RealESRGAN model", value=init_config.realesrgan_model, ) @@ -161,7 +182,7 @@ def main(config_file: str): init_config.enable_gfpgan, label=GFPGAN_HELP ) gfpgan_device = gr.Radio( - GFPGAN_AVAILABLE_DEVICES, + Device.values(), label="GFPGAN Device", value=init_config.gfpgan_device, ) @@ -170,40 +191,10 @@ def main(config_file: str): init_config.enable_restoreformer, label=RESTOREFORMER_HELP ) restoreformer_device = gr.Radio( - RESTOREFORMER_AVAILABLE_DEVICES, + Device.values(), label="RestoreFormer Device", value=init_config.restoreformer_device, ) - enable_gif = gr.Checkbox(init_config.enable_gif, label=GIF_HELP) - - with gr.Tab("Diffusion Model"): - sd_local_model_path = gr.Textbox( - init_config.sd_local_model_path, label=f"{SD_LOCAL_MODEL_HELP}" - ) - sd_controlnet = gr.Checkbox( - init_config.sd_controlnet, label=f"{SD_CONTROLNET_HELP}" - ) - sd_controlnet_method = gr.Radio( - SD_CONTROLNET_CHOICES, - label="ControlNet method", - value=init_config.sd_controlnet_method, - ) - no_half = gr.Checkbox(init_config.no_half, label=f"{NO_HALF_HELP}") - cpu_offload = gr.Checkbox( - init_config.cpu_offload, label=f"{CPU_OFFLOAD_HELP}" - ) - sd_cpu_textencoder = gr.Checkbox( - init_config.sd_cpu_textencoder, label=f"{SD_CPU_TEXTENCODER_HELP}" - ) - disable_nsfw = gr.Checkbox( - init_config.disable_nsfw, label=f"{DISABLE_NSFW_HELP}" - ) - enable_xformers = gr.Checkbox( - init_config.enable_xformers, label=f"{ENABLE_XFORMERS_HELP}" - ) - local_files_only = gr.Checkbox( - init_config.local_files_only, label=f"{LOCAL_FILES_ONLY_HELP}" - ) save_btn.click( save_config, @@ -211,19 +202,14 @@ def main(config_file: str): host, port, model, - sd_local_model_path, - sd_controlnet, - sd_controlnet_method, - device, - gui, - no_gui_auto_close, - no_half, - cpu_offload, - disable_nsfw, - sd_cpu_textencoder, - enable_xformers, - local_files_only, model_dir, + no_half, + low_mem, + cpu_offload, + disable_nsfw_checker, + local_files_only, + cpu_textencoder, + device, input, output_dir, quality, @@ -239,7 +225,6 @@ def main(config_file: str): gfpgan_device, enable_restoreformer, restoreformer_device, - enable_gif, ], message, ) diff --git a/lama_cleaner/__init__.py b/lama_cleaner/__init__.py deleted file mode 100644 index a1ce1a1..0000000 --- a/lama_cleaner/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -import os - -os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - -import warnings - -warnings.simplefilter("ignore", UserWarning) - -from lama_cleaner.parse_args import parse_args - - -def entry_point(): - args = parse_args() - # To make os.environ["XDG_CACHE_HOME"] = args.model_cache_dir works for diffusers - # https://github.com/huggingface/diffusers/blob/be99201a567c1ccd841dc16fb24e88f7f239c187/src/diffusers/utils/constants.py#L18 - from lama_cleaner.server import main - - main(args) diff --git a/lama_cleaner/app/.env b/lama_cleaner/app/.env deleted file mode 100644 index 3a85ad7..0000000 --- a/lama_cleaner/app/.env +++ /dev/null @@ -1 +0,0 @@ -REACT_APP_INPAINTING_URL="" \ No newline at end of file diff --git a/lama_cleaner/app/.eslintrc.json b/lama_cleaner/app/.eslintrc.json deleted file mode 100644 index c2681ae..0000000 --- a/lama_cleaner/app/.eslintrc.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "extends": [ - "airbnb", - "airbnb/hooks", - "plugin:@typescript-eslint/recommended", - "prettier", - "plugin:prettier/recommended" - ], - "plugins": ["@typescript-eslint", "react", "react-hooks", "prettier"], - "parser": "@typescript-eslint/parser", - "parserOptions": { - "ecmaFeatures": { - "jsx": true - }, - "ecmaVersion": 2018, - "sourceType": "module", - "project": "./tsconfig.json" - }, - "rules": { - "jsx-a11y/click-events-have-key-events": 0, - "react/jsx-props-no-spreading": 0, - "import/no-unresolved": 0, - "react/jsx-no-bind": "off", - "react/jsx-filename-extension": [ - 1, - { - "extensions": [".ts", ".tsx"] - } - ], - "prettier/prettier": [ - "error", - { - "singleQuote": true, - "arrowParens": "avoid", - "endOfLine": "auto" - } - ], - "consistent-return": "off", - "no-use-before-define": "off", - "import/extensions": "off", - "react/prop-types": 0, - "react/require-default-props": "off", - "no-shadow": "off", - "@typescript-eslint/ban-ts-comment": "off", - "@typescript-eslint/no-shadow": ["error"], - "@typescript-eslint/no-explicit-any": "off", - "@typescript-eslint/explicit-function-return-type": "off", - "@typescript-eslint/explicit-module-boundary-types": "off", - "react-hooks/rules-of-hooks": "error", - "react-hooks/exhaustive-deps": "warn" - } -} diff --git a/lama_cleaner/app/.gitignore b/lama_cleaner/app/.gitignore deleted file mode 100644 index c6fdf3e..0000000 --- a/lama_cleaner/app/.gitignore +++ /dev/null @@ -1,27 +0,0 @@ -# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. - -# dependencies -/node_modules -/.pnp -.pnp.js - -# testing -/coverage - -# production - -# misc -.DS_Store -.env.local -.env.development.local -.env.test.local -.env.production.local - -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -# Tailwind processed CSS -index.css - -.firebase diff --git a/lama_cleaner/app/.prettierrc b/lama_cleaner/app/.prettierrc deleted file mode 100644 index 2351328..0000000 --- a/lama_cleaner/app/.prettierrc +++ /dev/null @@ -1,6 +0,0 @@ -{ - "singleQuote": true, - "semi": false, - "trailingComma": "es5", - "arrowParens": "avoid" -} diff --git a/lama_cleaner/app/LICENSE b/lama_cleaner/app/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/lama_cleaner/app/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/lama_cleaner/app/build/asset-manifest.json b/lama_cleaner/app/build/asset-manifest.json deleted file mode 100644 index 0f8168c..0000000 --- a/lama_cleaner/app/build/asset-manifest.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "files": { - "main.css": "/static/css/main.ce986cc8.css", - "main.js": "/static/js/main.1fda6320.js", - "static/media/coffee-machine-lineal.gif": "/static/media/coffee-machine-lineal.ee32631219cc3986f861.gif", - "static/media/Inter.var.woff2?v=3.19": "/static/media/Inter.var.c2fe3cb2b7c746f7966a.woff2", - "static/media/Inter-italic.var.woff2?v=3.19": "/static/media/Inter-italic.var.30807be7abc48ba8c73c.woff2", - "static/media/Inter-roman.var.woff2?v=3.19": "/static/media/Inter-roman.var.ba4caefcdf5b36b438db.woff2", - "static/media/Inter-SemiBoldItalic.woff?v=3.19": "/static/media/Inter-SemiBoldItalic.463bdbfb28abad0fa6df.woff", - "static/media/Inter-BoldItalic.woff?v=3.19": "/static/media/Inter-BoldItalic.b376885042f6c961a541.woff", - "static/media/Inter-MediumItalic.woff?v=3.19": "/static/media/Inter-MediumItalic.3d0107dd43d0101274d3.woff", - "static/media/Inter-ExtraBoldItalic.woff?v=3.19": "/static/media/Inter-ExtraBoldItalic.6ab17abedc4d3f140953.woff", - "static/media/Inter-LightItalic.woff?v=3.19": "/static/media/Inter-LightItalic.ef9f65d91d2b0ba9b2e4.woff", - "static/media/Inter-ExtraLightItalic.woff?v=3.19": "/static/media/Inter-ExtraLightItalic.170dddfca278d3c2ad4a.woff", - "static/media/Inter-BlackItalic.woff?v=3.19": "/static/media/Inter-BlackItalic.ca1e738e4f349f27514d.woff", - "static/media/Inter-ThinItalic.woff?v=3.19": "/static/media/Inter-ThinItalic.bae95eb2f889c797e435.woff", - "static/media/Inter-Italic.woff?v=3.19": "/static/media/Inter-Italic.890025e726861dba417f.woff", - "static/media/Inter-Bold.woff?v=3.19": "/static/media/Inter-Bold.93c1301bd9f486c573b3.woff", - "static/media/Inter-SemiBold.woff?v=3.19": "/static/media/Inter-SemiBold.cca62d21c8c555c392e5.woff", - "static/media/Inter-ExtraBold.woff?v=3.19": "/static/media/Inter-ExtraBold.d0fa3bb2b7c9063dc594.woff", - "static/media/Inter-Medium.woff?v=3.19": "/static/media/Inter-Medium.9053572c46aeb4b16caa.woff", - "static/media/Inter-ExtraLight.woff?v=3.19": "/static/media/Inter-ExtraLight.c4248615291a9e8f1fb7.woff", - "static/media/Inter-Light.woff?v=3.19": "/static/media/Inter-Light.994e34451cc19ede31d3.woff", - "static/media/Inter-Black.woff?v=3.19": "/static/media/Inter-Black.c6938660eec019fefd68.woff", - "static/media/Inter-Thin.woff?v=3.19": "/static/media/Inter-Thin.29b9c616a95a912abf73.woff", - "static/media/Inter-Regular.woff?v=3.19": "/static/media/Inter-Regular.8c206db99195777c6769.woff", - "static/media/Inter-MediumItalic.woff2?v=3.19": "/static/media/Inter-MediumItalic.085cb93e613ba3d40d2b.woff2", - "static/media/Inter-SemiBoldItalic.woff2?v=3.19": "/static/media/Inter-SemiBoldItalic.d9467ee321a8f38aefff.woff2", - "static/media/Inter-BoldItalic.woff2?v=3.19": "/static/media/Inter-BoldItalic.2d26c56a606662486796.woff2", - "static/media/Inter-ExtraBoldItalic.woff2?v=3.19": "/static/media/Inter-ExtraBoldItalic.535a6cf662596b3bd6a6.woff2", - "static/media/Inter-ExtraLightItalic.woff2?v=3.19": "/static/media/Inter-ExtraLightItalic.5c7d7d6deb1d2ec8d48c.woff2", - "static/media/Inter-LightItalic.woff2?v=3.19": "/static/media/Inter-LightItalic.f86952265d7b0f02c921.woff2", - "static/media/Inter-BlackItalic.woff2?v=3.19": "/static/media/Inter-BlackItalic.cb2a7335650c690077fe.woff2", - "static/media/Inter-Italic.woff2?v=3.19": "/static/media/Inter-Italic.cb10ffd7684cd9836a05.woff2", - "static/media/Inter-ThinItalic.woff2?v=3.19": "/static/media/Inter-ThinItalic.bf213704dce6b437ede4.woff2", - "static/media/Inter-Bold.woff2?v=3.19": "/static/media/Inter-Bold.ec64ea577b0349e055ad.woff2", - "static/media/Inter-ExtraBold.woff2?v=3.19": "/static/media/Inter-ExtraBold.cbe0ae49c52c920fd563.woff2", - "static/media/Inter-Medium.woff2?v=3.19": "/static/media/Inter-Medium.293fd13dbca5a3e450ef.woff2", - "static/media/Inter-SemiBold.woff2?v=3.19": "/static/media/Inter-SemiBold.b5f0f109bc88052d4000.woff2", - "static/media/Inter-Light.woff2?v=3.19": "/static/media/Inter-Light.2d5198822ab091ce4305.woff2", - "static/media/Inter-ExtraLight.woff2?v=3.19": "/static/media/Inter-ExtraLight.72505e6a122c6acd5471.woff2", - "static/media/Inter-Black.woff2?v=3.19": "/static/media/Inter-Black.15ca31c0a2a68f76d2d1.woff2", - "static/media/Inter-Thin.woff2?v=3.19": "/static/media/Inter-Thin.fff2a096db014f6239d4.woff2", - "static/media/Inter-Regular.woff2?v=3.19": "/static/media/Inter-Regular.c8ba52b05a9ef10f4758.woff2", - "index.html": "/index.html" - }, - "entrypoints": [ - "static/css/main.ce986cc8.css", - "static/js/main.1fda6320.js" - ] -} \ No newline at end of file diff --git a/lama_cleaner/app/build/favicon.ico b/lama_cleaner/app/build/favicon.ico deleted file mode 100644 index f5607e2..0000000 Binary files a/lama_cleaner/app/build/favicon.ico and /dev/null differ diff --git a/lama_cleaner/app/build/index.html b/lama_cleaner/app/build/index.html deleted file mode 100644 index 704b475..0000000 --- a/lama_cleaner/app/build/index.html +++ /dev/null @@ -1 +0,0 @@ -lama-cleaner - Image inpainting powered by SOTA AI model
\ No newline at end of file diff --git a/lama_cleaner/app/build/static/css/main.ce986cc8.css b/lama_cleaner/app/build/static/css/main.ce986cc8.css deleted file mode 100644 index aeb198a..0000000 --- a/lama_cleaner/app/build/static/css/main.ce986cc8.css +++ /dev/null @@ -1 +0,0 @@ -:root{--blackA1:rgba(0,0,0,.012);--blackA2:rgba(0,0,0,.027);--blackA3:rgba(0,0,0,.047);--blackA4:rgba(0,0,0,.071);--blackA5:rgba(0,0,0,.09);--blackA6:rgba(0,0,0,.114);--blackA7:rgba(0,0,0,.141);--blackA8:rgba(0,0,0,.22);--blackA9:rgba(0,0,0,.439);--blackA10:rgba(0,0,0,.478);--blackA11:rgba(0,0,0,.565);--blackA12:rgba(0,0,0,.91);--mauve1:#fdfcfd;--mauve2:#f9f8f9;--mauve3:#f4f2f4;--mauve4:#eeedef;--mauve5:#e9e8ea;--mauve6:#e4e2e4;--mauve7:#dcdbdd;--mauve8:#c8c7cb;--mauve9:#908e96;--mauve10:#86848d;--mauve11:#6f6e77;--mauve12:#1a1523;--violet1:#fdfcfe;--violet2:#fbfaff;--violet3:#f5f2ff;--violet4:#ede9fe;--violet5:#e4defc;--violet6:#d7cff9;--violet7:#c4b8f3;--violet8:#aa99ec;--violet9:#6e56cf;--violet10:#644fc1;--violet11:#5746af;--violet12:#20134b;--page-bg:#fff;--page-bg-light:hsla(0,0%,100%,.5);--page-text-color:#040404;--yellow-accent:#fc0;--yellow-accent-light:#ffcc0055;--link-color:#000;--border-color:#eff1f4;--border-color-light:hsla(240,9%,43%,.5);--tooltip-bg:#e6e6ea;--tooltip-text-color:#000;--error-color:#ef4444;--success-color:#10b981;--editor-toolkit-bg:hsla(0,0%,100%,.5);--editor-options-bg:#e6e6ea;--options-text-color:var(--page-text-color);--editor-size-border-color:var(--border-color);--editor-toolkit-panel-border:0;--modal-bg:var(--page-bg);--modal-text-color:#000;--modal-hotkey-border-color:#000;--model-mask-bg:rgba(209,213,219,.4);--text-color:#040404;--text-color-gray:#6b6f76;--text-color-disabled:#6b6f76;--btn-text-color:var(--text-color);--btn-text-hover-color:#040404;--btn-border-color:#646478;--btn-primary-hover-bg:var(--yellow-accent);--animation-pulsing-bg:hsla(0,0%,100%,.5);--switch-root-background-color:#dfe1e4;--switch-thumb-color:var(--page-bg);--switch-thumb-checked-color:var(--page-bg);--slider-background-color:var(--switch-root-background-color);--tooltip-bg:var(--page-bg);--badge-background-color:#f1f3f5;--badge-color:#687076;--box-shadow:inset 0 0.5px hsla(0,0%,100%,.1),inset 0 1px 5px #f8f9fa,0px 0px 0px 0.5px #c1c8cd,0px 2px 1px -1px #c1c8cd,0 1px #c1c8cd;--croper-bg:rgba(0,0,0,.5);--tabs-active-color:#f0f3f9}[data-theme=dark]{--page-bg:#040404;--page-bg-light:#04040488;--page-text-color:#f9f9f9;--yellow-accent:#fc0;--yellow-accent-light:#ffcc0055;--link-color:var(--yellow-accent);--border-color:#1e1e1e;--border-color-light:#666;--tooltip-bg:#212121;--tooltip-text-color:#d2d2d2;--editor-toolkit-bg:rgba(0,0,0,.5);--editor-options-bg:#212121;--options-text-color:var(--page-text-color);--editor-size-border-color:var(--yellow-accent);--editor-toolkit-panel-border:1px solid hsla(240,9%,43%,.4);--modal-bg:var(--page-bg);--modal-text-color:var(--page-text-color);--modal-hotkey-border-color:var(--page-text-color);--model-mask-bg:rgba(76,76,87,.4);--text-color:#fff;--text-color-gray:#c3c4c6;--text-color-disabled:#6b6f76;--btn-text-color:var(--text-color);--btn-text-hover-color:var(--page-bg);--btn-border-color:var(--yellow-accent);--btn-primary-hover-bg:var(--yellow-accent);--animation-pulsing-bg:#f0f0ff;--switch-root-background-color:#3c3f44;--switch-thumb-color:#1f2023;--switch-thumb-checked-color:#fff;--slider-background-color:var(--switch-root-background-color);--badge-background-color:#202425;--badge-color:#9ba1a6;--box-shadow:inset 0 0.5px hsla(0,0%,100%,.1),inset 0 1px 5px #1a1d1e,0px 0px 0px 0.5px #4c5155,0px 2px 1px -1px #4c5155,0 1px #4c5155;--croper-bg:rgba(0,0,0,.5);--tabs-active-color:#272831}@supports (color:hsl(0 0% 0%/0)){[data-theme=dark]{--tooltip-bg:#202425}}@-webkit-keyframes pulsing{0%{opacity:1}50%{background-color:hsla(0,0%,100%,.5);background-color:var(--animation-pulsing-bg);opacity:.75}to{opacity:1}}@keyframes pulsing{0%{opacity:1}50%{background-color:hsla(0,0%,100%,.5);background-color:var(--animation-pulsing-bg);opacity:.75}to{opacity:1}}@-webkit-keyframes opacityReveal{0%{opacity:0}to{opacity:1}}@keyframes opacityReveal{0%{opacity:0}to{opacity:1}}@-webkit-keyframes slideDown{0%{-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{-webkit-transform:translateY(0);transform:translateY(0)}}@keyframes slideDown{0%{-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{-webkit-transform:translateY(0);transform:translateY(0)}}@-webkit-keyframes slideUp{0%{-webkit-transform:translateY(100%);transform:translateY(100%)}to{-webkit-transform:translateY(0);transform:translateY(0)}}@keyframes slideUp{0%{-webkit-transform:translateY(100%);transform:translateY(100%)}to{-webkit-transform:translateY(0);transform:translateY(0)}}@-webkit-keyframes slideIn{0%{-webkit-transform:translateX(calc(100% + 25px));transform:translateX(calc(100% + 25px))}to{-webkit-transform:translateX(0);transform:translateX(0)}}@keyframes slideIn{0%{-webkit-transform:translateX(calc(100% + 25px));transform:translateX(calc(100% + 25px))}to{-webkit-transform:translateX(0);transform:translateX(0)}}@-webkit-keyframes spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@-webkit-keyframes slideUpAndFade{0%{opacity:0;-webkit-transform:translateY(2px);transform:translateY(2px)}to{opacity:1;-webkit-transform:translateY(0);transform:translateY(0)}}@-webkit-keyframes slideDownAndFade{0%{opacity:0;-webkit-transform:translateY(-2px);transform:translateY(-2px)}to{opacity:1;-webkit-transform:translateY(0);transform:translateY(0)}}.lama-cleaner{background-color:#fff;background-color:var(--page-bg);color:#040404;color:var(--page-text-color);display:grid;grid-template-areas:"main-content";height:100vh;transition-duration:.2s;transition-property:background-color,color;transition-timing-function:repeat(2,ease-out);width:100vw}a{color:inherit;text-decoration:inherit}input:disabled{color:#6b6f76;color:var(--text-color-gray)}.editor-container{align-items:center;display:flex;height:100vh;justify-content:center;width:100vw}.react-transform-wrapper{display:grid!important;height:100%!important;width:100%!important}.editor-canvas-container{grid-row-gap:1rem;display:grid;grid-template-areas:"editor-content";row-gap:1rem}.editor-canvas{grid-area:editor-content;z-index:2}.original-image-container{display:grid;grid-area:editor-content;grid-template-areas:"original-image-content";pointer-events:none}.original-image-container img{grid-area:original-image-content}.original-image-container .editor-slider{background-color:#fc0;background-color:var(--yellow-accent);grid-area:original-image-content;height:100%;justify-self:end;transition:all .3s cubic-bezier(.4,0,.2,1);width:6px;z-index:2}.editor-canvas-loading{-webkit-animation:pulsing .75s infinite;animation:pulsing .75s infinite;pointer-events:none}.editor-toolkit-panel{align-items:center;-webkit-animation:slideUp .2s ease-out;animation:slideUp .2s ease-out;-webkit-backdrop-filter:blur(12px);backdrop-filter:blur(12px);background-color:hsla(0,0%,100%,.5);background-color:var(--page-bg-light);border:0;border:var(--editor-toolkit-panel-border);border-radius:3rem;bottom:.5rem;box-shadow:0 0 0 1px rgba(0,0,0,.102),0 3px 16px rgba(0,0,0,.078),0 2px 6px 1px rgba(0,0,0,.09);display:flex;gap:16px;justify-content:center;padding:.4rem 24px;position:fixed}@media screen and (max-width:767px){.editor-toolkit-panel{grid-template-areas:"toolkit-size-selector toolkit-size-selector" "toolkit-brush-slider toolkit-brush-slider" "toolkit-btns toolkit-btns";justify-items:center;padding:1rem 2rem;row-gap:2rem}}.editor-toolkit-panel .eyeicon-active{background-color:#fc0;background-color:var(--yellow-accent);color:#040404;color:var(--btn-text-hover-color)}.editor-brush-slider{grid-column-gap:1rem;align-items:center;-webkit-column-gap:1rem;column-gap:1rem;display:grid;grid-area:toolkit-brush-slider;grid-template-columns:repeat(2,-webkit-max-content);grid-template-columns:repeat(2,max-content);height:-webkit-max-content;height:max-content;-webkit-user-select:none;user-select:none}.editor-brush-slider input[type=range]{-webkit-appearance:none;appearance:none;background:transparent;border-color:transparent;color:transparent;cursor:pointer;width:100%}.editor-brush-slider input[type=range]:focus{outline:none}.editor-brush-slider input[type=range]::-webkit-slider-thumb{-webkit-appearance:none;background:#fc0;background:var(--yellow-accent);border:1px solid #000;border-radius:50%;height:1.2rem;margin-top:-.5rem;width:1.2rem;z-index:2}.editor-brush-slider input[type=range]::-webkit-slider-runnable-track{background:#dfe1e4;background:var(--slider-background-color);border-radius:2rem;height:.2rem}.editor-brush-slider input[type=range]::-moz-range-track{background:#dfe1e4;background:var(--slider-background-color);border-radius:2rem}.editor-brush-slider input[type=range]::-moz-range-progress{background:#fc0;background:var(--yellow-accent)}.editor-toolkit-btns{display:flex;gap:12px}.brush-shape{background-color:rgba(255,204,0,.733);border:1px solid #fc0;border:1px solid var(--yellow-accent);border-radius:50%;pointer-events:none;position:absolute}.file-manager-modal{color:#040404;color:var(--text-color);height:90%;width:80%}.react-photo-album.react-photo-album--columns{height:80vh}.react-photo-album--photo{border:1px solid transparent;border-radius:8px;transition:visibility .25s ease-in,-webkit-transform .25s;transition:transform .25s,visibility .25s ease-in;transition:transform .25s,visibility .25s ease-in,-webkit-transform .25s;-webkit-user-select:none;user-select:none}.react-photo-album--photo:hover{border:1px solid #eff1f4;border:1px solid var(--border-color);-webkit-transform:scale(1.03);transform:scale(1.03)}.ScrollAreaRoot{--scrollbar-size:10px;border-radius:4px;overflow:hidden}.ScrollAreaViewport{border-radius:inherit;height:100%;width:100%}.ScrollAreaScrollbar{display:flex;padding:2px;touch-action:none;transition:background .16s ease-out;-webkit-user-select:none;user-select:none}.ScrollAreaScrollbar:hover{background:var(--blackA8)}.ScrollAreaScrollbar[data-orientation=vertical]{width:var(--scrollbar-size)}.ScrollAreaScrollbar[data-orientation=horizontal]{flex-direction:column;height:var(--scrollbar-size)}.ScrollAreaThumb{background:var(--mauve10);border-radius:var(--scrollbar-size);flex:1 1;position:relative}.ScrollAreaThumb:before{content:"";height:100%;left:50%;min-height:44px;min-width:44px;position:absolute;top:50%;-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);width:100%}.ScrollAreaCorner{background:var(--blackA8)}.file-search-input{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:12px;height:32px;padding-left:30px;width:250px}.sort-btn-inactive svg{opacity:.5}button,fieldset,input{all:unset}.TabsRoot{align-self:flex-start;background-color:#fff;background-color:var(--page-bg);display:flex;flex-direction:column;gap:8px}.TabsList{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:12px;flex-direction:row;gap:6px;padding:4px}.TabsList,.TabsTrigger{background-color:#fff;background-color:var(--page-bg);display:flex;justify-content:flex-start}.TabsTrigger{align-items:center;border-radius:8px;color:#000;color:var(--modal-text-color);font-family:inherit;font-size:15px;line-height:1;padding:8px;-webkit-user-select:none;user-select:none}.TabsTrigger:hover,.TabsTrigger[data-state=active]{background-color:#f0f3f9;background-color:var(--tabs-active-color)}.TabsTrigger:focus{position:relative}.TabsContent{background-color:#fff;background-color:var(--page-bg);outline:none;width:100%}.TabsContent[data-state=active]{display:flex;flex-direction:column;gap:14px}.landing-page{grid-row-gap:2rem;display:grid;grid-auto-rows:-webkit-max-content;grid-auto-rows:max-content;justify-items:center;place-self:center;row-gap:2rem}@media screen and (max-width:767px){.landing-page{padding:1rem}}.landing-page h1{font-size:1.4rem;text-align:center}@media screen and (max-width:767px){.landing-page h1{font-size:1.2rem}}.landing-page a{color:#000;color:var(--link-color)}.landing-file-selector{display:grid}header{align-items:center;-webkit-backdrop-filter:blur(12px);backdrop-filter:blur(12px);background-color:hsla(0,0%,100%,.5);background-color:var(--page-bg-light);border-bottom:1px solid hsla(240,9%,43%,.2);display:flex;height:60px;justify-content:space-between;padding:1rem 1.5rem;position:absolute;top:0;width:100%;z-index:20}.shortcuts{z-index:1}.header-icons-wrapper{gap:12px}.header-icons,.header-icons-wrapper{align-items:center;display:flex;justify-content:center;justify-self:end}.header-icons{gap:6px}.mask-preview{margin-left:20px;margin-top:30px;max-height:400px;max-width:400px}.imageSize,.mask-preview{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:8px}.imageSize{background-color:#fff;background-color:var(--page-bg);padding:8px;z-index:4}.prompt-wrapper{display:flex;gap:12px}.prompt-wrapper input{all:unset;border-radius:.5rem;border-width:0;min-width:600px;outline:1px solid #eff1f4;outline:1px solid var(--border-color);padding:0 .8rem}.prompt-wrapper input:focus-visible{border-width:0;outline:1px solid #fc0;outline:1px solid var(--yellow-accent)}.theme-toggle-ui{transition:all .2s ease-in;-webkit-user-select:none;user-select:none;z-index:10}.theme-toggle-ui .theme-btn{align-items:center;cursor:pointer;display:flex;justify-content:center;outline:none}.theme-toggle-ui .theme-btn svg{height:22px;width:22px}.modal-shortcuts{background-color:#fff;background-color:var(--modal-bg);box-shadow:0 0 20px rgba(0,0,40,.2);color:#000;color:var(--modal-text-color);grid-area:main-content}@media screen and (max-width:767px){.modal-shortcuts{-webkit-animation:slideDown .2s ease-out;animation:slideDown .2s ease-out;display:grid;height:auto;width:100%}}.shortcut-options{display:flex;flex-direction:row;gap:48px}.shortcut-options .shortcut-option{grid-column-gap:2rem;align-items:center;-webkit-column-gap:2rem;column-gap:2rem;display:grid;grid-template-columns:repeat(2,auto)}@media screen and (max-width:767px){.shortcut-options .shortcut-option{-webkit-column-gap:0;column-gap:0;row-gap:.6rem}}.shortcut-options .shortcut-key{background-color:#fff;background-color:var(--page-bg);border-radius:6px;box-shadow:inset 0 .5px hsla(0,0%,100%,.1),inset 0 1px 5px #f8f9fa,0 0 0 .5px #c1c8cd,0 2px 1px -1px #c1c8cd,0 1px #c1c8cd;box-shadow:var(--box-shadow);box-sizing:border-box;color:#000;color:var(--modal-text-color);font-family:inherit;font-weight:400;justify-self:end;line-height:1.5;padding-left:.5rem;padding-right:.5rem;text-shadow:0 0 1px hsla(0,0%,100%,.5);-webkit-user-select:none;user-select:none;white-space:nowrap;width:-webkit-max-content;width:max-content}@media screen and (max-width:767px){.shortcut-options .shortcut-key{padding:.2rem .4rem}}.shortcut-options .shortcut-description{font-size:.95rem;justify-self:start;text-align:left}@media screen and (max-width:767px){.shortcut-options .shortcut-description{justify-self:start;text-align:left;width:auto}}.shortcut-options-column{gap:12px;width:320px}.setting-block,.setting-block .option-desc,.shortcut-options-column{display:flex;flex-direction:column}.setting-block .option-desc{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:.3rem;color:#6b6f76;color:var(--text-color-gray);gap:8px;margin-top:12px;padding:1rem}.setting-block .option-desc .sub-setting-block{color:#040404;color:var(--text-color)}.setting-block .option-desc svg{color:#6b6f76;color:var(--text-color-gray)}.setting-block-content{align-items:center;display:flex;gap:12rem;justify-content:space-between}.setting-block-content-v{align-items:flex-start;display:flex;flex-direction:column;gap:1rem;justify-content:flex-start}.setting-block-content-title{align-items:center;display:flex;flex-direction:row;gap:8px;justify-content:center}.setting-block-desc{color:#6b6f76;color:var(--text-color-gray);font-size:1rem;margin-top:8px}.hd-setting-block .inline-tip{color:#040404;color:var(--text-color);cursor:pointer;display:inline}.model-desc-link{border-radius:999px;color:#687076;color:var(--badge-color);display:flex;justify-items:center;padding-left:5px;padding-right:5px;text-decoration:none}.modal-setting{background-color:#fff;background-color:var(--modal-bg);box-shadow:0 0 20px rgba(0,0,40,.2);color:#000;color:var(--modal-text-color);width:680px}@media screen and (max-width:767px){.modal-setting{-webkit-animation:slideDown .2s ease-out;animation:slideDown .2s ease-out;display:grid;height:auto;margin-top:-11rem;width:100%}}.folder-path-block{display:flex;flex-direction:column;gap:12px}.folder-path{border-radius:6px;border-width:0;outline:1px solid #eff1f4;outline:1px solid var(--border-color);padding:.3rem .5rem;width:95%}.folder-path:focus-visible{border-width:0;outline:1px solid #fc0;outline:1px solid var(--yellow-accent)}.side-panel{background-color:#fff;background-color:var(--page-bg);border-color:#eff1f4;border-color:var(--border-color);border-radius:.8rem;border-style:solid;border-width:1px;padding:.1rem .3rem;position:absolute;right:1.5rem;top:68px;z-index:4}.side-panel-trigger{border:0;font-size:16px}.side-panel-content{background-color:#fff;background-color:var(--page-bg);border-color:#eff1f4;border-color:var(--border-color);border-radius:.8rem;border-style:solid;border-width:1px;color:#040404;color:var(--text-color);display:flex;flex-direction:column;font-size:14px;gap:12px;outline:none;padding:1rem;position:relative;right:1.5rem;top:8px;z-index:9}.side-panel-content .setting-block-content{gap:1rem}.negative-prompt{all:unset;border-radius:.5rem;border-width:0;max-width:200px;min-height:150px;outline:1px solid #eff1f4;outline:1px solid var(--border-color);padding:12px .8rem;width:100%}.negative-prompt:focus-visible{border-width:0;outline:1px solid #fc0;outline:1px solid var(--yellow-accent)}.negative-prompt:-webkit-input-placeholder{padding-top:10px}.negative-prompt:-moz-input-placeholder{padding-top:10px}.negative-prompt:-ms-input-placeholder{padding-top:10px}.resize-title-tile{color:#6b6f76;color:var(--text-color-gray);font-size:.5rem;width:86px}.plugins{background-color:#fff;background-color:var(--page-bg);border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:8px;z-index:4}.DropdownMenuContent,.DropdownMenuSubContent{-webkit-animation-duration:.4s;animation-duration:.4s;-webkit-animation-timing-function:cubic-bezier(.16,1,.3,1);animation-timing-function:cubic-bezier(.16,1,.3,1);background-color:#fff;background-color:var(--page-bg);border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:6px;box-shadow:0 10px 38px -10px rgba(22,23,24,.35),0 10px 20px -15px rgba(22,23,24,.2);min-width:80px;padding:5px;will-change:transform,opacity}.DropdownMenuContent[data-side=top],.DropdownMenuSubContent[data-side=top]{-webkit-animation-name:slideDownAndFade;animation-name:slideDownAndFade}.DropdownMenuContent[data-side=right],.DropdownMenuSubContent[data-side=right]{-webkit-animation-name:slideLeftAndFade;animation-name:slideLeftAndFade}.DropdownMenuContent[data-side=bottom],.DropdownMenuSubContent[data-side=bottom]{-webkit-animation-name:slideUpAndFade;animation-name:slideUpAndFade}.DropdownMenuContent[data-side=left],.DropdownMenuSubContent[data-side=left]{-webkit-animation-name:slideRightAndFade;animation-name:slideRightAndFade}.DropdownMenuCheckboxItem,.DropdownMenuItem,.DropdownMenuRadioItem,.DropdownMenuSubTrigger{align-items:center;border-radius:3px;color:#040404;color:var(--btn-text-color);display:flex;font-size:13px;gap:8px;height:32px;line-height:1;outline:none;padding:0 5px;position:relative;-webkit-user-select:none;user-select:none}.DropdownMenuSubTrigger[data-state=open]{background-color:#fff;background-color:var(--page-bg);color:#040404;color:var(--btn-text-color)}.DropdownMenuCheckboxItem[data-disabled],.DropdownMenuItem[data-disabled],.DropdownMenuRadioItem[data-disabled],.DropdownMenuSubTrigger[data-disabled]{color:#6b6f76;color:var(--text-color-disabled);pointer-events:none}.DropdownMenuCheckboxItem[data-highlighted],.DropdownMenuItem[data-highlighted],.DropdownMenuRadioItem[data-highlighted],.DropdownMenuSubTrigger[data-highlighted]{background-color:#fc0;background-color:var(--yellow-accent);color:#040404;color:var(--btn-text-hover-color)}.RightSlot{align-items:center;color:#040404;color:var(--btn-text-color);display:flex;margin-left:auto;padding-left:10px}[data-highlighted]>.RightSlot{color:#040404;color:var(--btn-text-hover-color)}[data-disabled] .RightSlot{color:#6b6f76;color:var(--text-color-gray)}@keyframes slideUpAndFade{0%{opacity:0;-webkit-transform:translateY(2px);transform:translateY(2px)}to{opacity:1;-webkit-transform:translateY(0);transform:translateY(0)}}@-webkit-keyframes slideRightAndFade{0%{opacity:0;-webkit-transform:translateX(-2px);transform:translateX(-2px)}to{opacity:1;-webkit-transform:translateX(0);transform:translateX(0)}}@keyframes slideRightAndFade{0%{opacity:0;-webkit-transform:translateX(-2px);transform:translateX(-2px)}to{opacity:1;-webkit-transform:translateX(0);transform:translateX(0)}}@keyframes slideDownAndFade{0%{opacity:0;-webkit-transform:translateY(-2px);transform:translateY(-2px)}to{opacity:1;-webkit-transform:translateY(0);transform:translateY(0)}}@-webkit-keyframes slideLeftAndFade{0%{opacity:0;-webkit-transform:translateX(2px);transform:translateX(2px)}to{opacity:1;-webkit-transform:translateX(0);transform:translateX(0)}}@keyframes slideLeftAndFade{0%{opacity:0;-webkit-transform:translateX(2px);transform:translateX(2px)}to{opacity:1;-webkit-transform:translateX(0);transform:translateX(0)}}.crop-border{outline-color:#fc0;outline-color:var(--yellow-accent);outline-style:dashed}.info-bar{align-items:center;background-color:#fff;background-color:var(--page-bg);border:0;border:var(--editor-toolkit-panel-border);border-radius:9999px;box-shadow:0 0 0 1px rgba(0,0,0,.102),0 3px 16px rgba(0,0,0,.078),0 2px 6px 1px rgba(0,0,0,.09);color:#040404;color:var(--text-color);display:flex;font-size:1rem;gap:12px;justify-content:center;padding:.2rem .8rem;pointer-events:auto;position:absolute}.info-bar:hover{cursor:move}.croper-wrapper{height:100%;overflow:hidden;position:absolute;width:100%}.croper,.croper-wrapper{pointer-events:none;z-index:2}.croper{bottom:0;box-shadow:0 0 0 9999px rgba(0,0,0,.5);left:0;position:relative;right:0;top:0}.drag-bar{pointer-events:auto;position:absolute}.drag-bar.ord-top{cursor:ns-resize;height:12px;left:0;margin-top:-6px;top:0;width:100%}.drag-bar.ord-right{cursor:ew-resize;height:100%;margin-right:-6px;right:0;top:0;width:12px}.drag-bar.ord-bottom{bottom:0;cursor:ns-resize;height:12px;left:0;margin-bottom:-6px;width:100%}.drag-bar.ord-left{cursor:ew-resize;height:100%;left:0;margin-left:-6px;top:0;width:12px}.drag-handle{background-color:#ffcc0055;background-color:var(--yellow-accent-light);border:2px solid #fc0;border:2px solid var(--yellow-accent);content:"";display:block;height:12px;pointer-events:auto;position:absolute;width:12px;z-index:4}.drag-handle:hover{background-color:#fc0;background-color:var(--yellow-accent)}.drag-handle.ord-topleft{cursor:nw-resize;left:-7px;top:-7px}.drag-handle.ord-topright{cursor:ne-resize;right:-7px;top:-7px}.drag-handle.ord-bottomright{bottom:-7px;cursor:se-resize;right:-7px}.drag-handle.ord-bottomleft{bottom:-7px;cursor:sw-resize;left:-7px}.drag-handle.ord-bottom,.drag-handle.ord-top{cursor:ns-resize;left:calc(50% - 6px)}.drag-handle.ord-top{top:-7px}.drag-handle.ord-bottom{bottom:-7px}.drag-handle.ord-left,.drag-handle.ord-right{cursor:ew-resize;top:calc(50% - 6px)}.drag-handle.ord-left{left:-7px}.drag-handle.ord-right{right:-7px}.interactive-seg-wrapper{height:100%;overflow:hidden;pointer-events:none;position:absolute;width:100%;z-index:2}.interactive-seg-wrapper .click-item{border-radius:50%;height:8px;position:absolute;width:8px}.interactive-seg-wrapper .click-item-positive{background-color:rgba(21,215,121,.936);outline:6px solid rgba(98,255,179,.31)}.interactive-seg-wrapper .click-item-negative{background-color:rgba(237,49,55,.942);outline:6px solid rgba(255,89,95,.31)}.interactive-seg-confirm-actions{background-color:#fff;background-color:var(--page-bg);border-color:#eff1f4;border-color:var(--border-color);border-radius:16px;border-style:solid;border-width:1px;padding:8px;position:absolute;top:68px;z-index:5}.interactive-seg-confirm-actions .action-buttons{align-items:center;display:flex;gap:8px;justify-content:center}@-webkit-keyframes pulse{to{box-shadow:0 0 0 14px rgba(21,215,121,0)}}@keyframes pulse{to{box-shadow:0 0 0 14px rgba(21,215,121,0)}}.interactive-seg-cursor{-webkit-animation:pulse 1.5s cubic-bezier(.66,0,0,1) infinite;animation:pulse 1.5s cubic-bezier(.66,0,0,1) infinite;background-color:rgba(21,215,121,.936);border-radius:50%;box-shadow:0 0 0 0 rgba(21,215,121,.936);color:rgba(234,255,240,.98);height:20px;pointer-events:none;position:absolute;width:20px}.ProgressWrapper{align-items:center;border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:14px;display:flex;gap:18px;height:32px;justify-content:center;left:50%;padding-left:8px;padding-right:8px;position:fixed;top:68px;-webkit-transform:translateX(-50%);transform:translateX(-50%)}.ProgressRoot{background:#fff;background:var(--page-bg);border-radius:99999px;height:10px;overflow:hidden;position:relative;-webkit-transform:translateZ(0);transform:translateZ(0);width:130px}.ProgressIndicator{background-color:#fc0;background-color:var(--yellow-accent);height:100%;transition:-webkit-transform 60ms cubic-bezier(.65,0,.35,1);transition:transform 60ms cubic-bezier(.65,0,.35,1);transition:transform 60ms cubic-bezier(.65,0,.35,1),-webkit-transform 60ms cubic-bezier(.65,0,.35,1);width:100%}.file-select-label{border:2px dashed #eff1f4;border:2px dashed var(--border-color);border-radius:.5rem;cursor:pointer;display:grid;min-width:600px}@media screen and (max-width:767px){.file-select-label{min-width:300px}}.file-select-label .file-select-label-hover,.file-select-label:hover{background-color:#fc0;background-color:var(--yellow-accent);color:#000}.file-select-container{display:grid;height:100%;padding:4rem;width:100%}.file-select-container input{display:none}.file-select-message{text-align:center}.btn-primary{grid-column-gap:1rem;background-color:#fff;background-color:var(--page-bg);border-radius:.5rem;color:#040404;color:var(--btn-text-color);-webkit-column-gap:1rem;column-gap:1rem;cursor:pointer;display:grid;grid-auto-flow:column;padding:.5rem;place-items:center;-webkit-user-select:none;user-select:none;width:-webkit-max-content;width:max-content;z-index:1}.btn-primary:hover{background-color:#fc0;background-color:var(--btn-primary-hover-bg);color:#040404;color:var(--btn-text-hover-color)}.btn-primary svg{height:auto;width:20px}.btn-primary-disabled{background-color:#fff;background-color:var(--page-bg);opacity:.5;pointer-events:none;-webkit-user-select:none;user-select:none}.btn-border{border-color:#646478;border-color:var(--btn-border-color);border-style:solid;border-width:1px}.modal-mask{-webkit-backdrop-filter:blur(12px);backdrop-filter:blur(12px);background-color:rgba(209,213,219,.4);background-color:var(--model-mask-bg);inset:0;position:fixed;z-index:9998}@media(prefers-reduced-motion:no-preference){.modal-mask{-webkit-animation:opacityReveal .15s cubic-bezier(.16,1,.3,1) forwards;animation:opacityReveal .15s cubic-bezier(.16,1,.3,1) forwards}}@-webkit-keyframes contentShow{0%{opacity:0;-webkit-transform:translate(-50%,-48%) scale(.96);transform:translate(-50%,-48%) scale(.96)}to{opacity:1;-webkit-transform:translate(-50%,-50%) scale(1);transform:translate(-50%,-50%) scale(1)}}@keyframes contentShow{0%{opacity:0;-webkit-transform:translate(-50%,-48%) scale(.96);transform:translate(-50%,-48%) scale(.96)}to{opacity:1;-webkit-transform:translate(-50%,-50%) scale(1);transform:translate(-50%,-50%) scale(1)}}.modal{background-color:#fff;background-color:var(--page-bg);border-radius:.95rem;display:flex;flex-direction:column;gap:16px;left:50%;padding:25px;place-self:center;position:fixed;top:50%;-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);z-index:9999}.modal:focus{outline:none}.modal .modal-header{align-items:center;display:grid;grid-template-columns:repeat(2,auto)}.modal .modal-header .btn-primary{justify-self:end}@media(prefers-reduced-motion:no-preference){.modal{-webkit-animation:contentShow .15s cubic-bezier(.16,1,.3,1) forwards;animation:contentShow .15s cubic-bezier(.16,1,.3,1) forwards}}.select-trigger{all:unset;align-items:center;background-color:#fff;background-color:var(--page-bg);border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:.5rem;color:#040404;color:var(--options-text-color);display:inline-flex;gap:8px;height:32px;justify-content:space-between;padding:0 .8rem}.select-trigger svg{height:1rem;margin-top:.25rem;width:1rem}.select-trigger:hover{border-color:#fc0;border-color:var(--yellow-accent)}.select-trigger:disabled{border-color:#eff1f4;border-color:var(--border-color);color:#eff1f4;color:var(--border-color)}.select-content{background-color:#fff;background-color:var(--page-bg);border-radius:.5rem;overflow:hidden}.select-viewport{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:.5rem;padding:5px}.select-item{all:unset;align-items:center;background-color:#fff;background-color:var(--page-bg);border-radius:.5rem;color:#040404;color:var(--options-text-color);display:flex;padding:6px 6px 6px 25px;position:relative;-webkit-user-select:none;user-select:none}.select-item:focus{background-color:#fc0;background-color:var(--yellow-accent);color:#040404;color:var(--btn-text-hover-color)}.select-item-indicator{align-items:center;display:inline-flex;justify-content:center;left:0;padding-right:4px;position:absolute;width:25px}.switch-root{-webkit-tap-highlight-color:rgba(0,0,0,0);all:"unset";background-color:#dfe1e4;background-color:var(--switch-root-background-color);border:none;border-radius:9999px;height:25px;position:relative;transition:background-color .1s;width:42px}.switch-root:focus-visible{outline:none}.switch-root[data-state=checked]{background-color:#fc0;background-color:var(--yellow-accent)}.switch-thumb{background-color:#fff;background-color:var(--switch-thumb-color);border-radius:9999px;display:block;height:17px;-webkit-transform:translateX(4px);transform:translateX(4px);transition:-webkit-transform .1s;transition:transform .1s;transition:transform .1s,-webkit-transform .1s;width:17px;will-change:transform}.switch-thumb[data-state=checked]{background-color:#fff;background-color:var(--switch-thumb-checked-color);outline:1px solid hsla(240,9%,43%,.5);-webkit-transform:translateX(21px);transform:translateX(21px)}.number-input{all:unset;border-radius:.5rem;flex:1 0 auto;height:32px;outline:1px solid #eff1f4;outline:1px solid var(--border-color);padding:0 .8rem;text-align:right}.number-input:focus-visible{outline:1px solid #fc0;outline:1px solid var(--yellow-accent)}.number-input:disabled{color:#eff1f4;color:var(--border-color)}.toast-viewpoint{bottom:48px;display:flex;flex-direction:row;gap:10px;margin:0;max-width:100vw;padding:25px;position:fixed;right:1.5rem;z-index:999999}.toast-viewpoint:focus-visible{outline:none}.toast-root{align-items:center;background-color:#fff;background-color:var(--page-bg);border:1px solid hsla(240,9%,43%,.5);border:1px solid var(--border-color-light);border-radius:.6rem;cursor:pointer;display:flex;gap:12px;max-width:400px;padding:15px}.toast-root[data-state=open]{-webkit-animation:slideIn .15s cubic-bezier(.16,1,.3,1);animation:slideIn .15s cubic-bezier(.16,1,.3,1)}.toast-root[data-state=close]{-webkit-animation:opacityReveal .1s ease-in forwards;animation:opacityReveal .1s ease-in forwards}.toast-root[data-state=cancel]{-webkit-animation:transform .1s ease-out;animation:transform .1s ease-out;-webkit-transform:translateX(0);transform:translateX(0)}.toast-root.error{border:1px solid #ef4444;border:1px solid var(--error-color)}.toast-root.success{border:1px solid #10b981;border:1px solid var(--success-color)}.error-icon{color:#ef4444;color:var(--error-color);height:24px;width:24px}.success-icon{color:#10b981;color:var(--success-color);height:24px;width:24px}.loading-icon{-webkit-animation-duration:1.5s;animation-duration:1.5s;-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite;-webkit-animation-name:spin;animation-name:spin;-webkit-animation-timing-function:linear;animation-timing-function:linear;-webkit-transform-origin:center center;transform-origin:center center}.loading-icon,.toast-desc,.toast-icon{align-items:center;display:flex}.toast-desc{color:#040404;color:var(--text-color);margin:0;min-width:240px}.tooltip-trigger{align-items:center;display:flex;justify-content:center}.tooltip-content{background-color:#fff;background-color:var(--tooltip-bg);border-radius:4px;box-shadow:0 10px 38px -10px rgba(14,18,22,.35),0 10px 20px -15px rgba(14,18,22,.2);color:#000;color:var(--tooltip-text-color);padding:10px 15px}@media(prefers-reduced-motion:no-preference){.tooltip-content{-webkit-animation-duration:.4s;animation-duration:.4s;-webkit-animation-fill-mode:forwards;animation-fill-mode:forwards;-webkit-animation-timing-function:cubic-bezier(.16,1,.3,1);animation-timing-function:cubic-bezier(.16,1,.3,1);will-change:transform,opacity}.tooltip-content[data-state=delayed-open][data-side=top]{-webkit-animation-name:slideDownAndFade;animation-name:slideDownAndFade}.tooltip-content[data-state=delayed-open][data-side=bottom]{-webkit-animation-name:slideUpAndFade;animation-name:slideUpAndFade}}.tooltip-arrow{fill:#fff;fill:var(--tooltip-bg)}*,:after,:before{box-sizing:border-box;margin:0;padding:0}@font-face{font-display:swap;font-family:Inter;font-style:normal;font-weight:100;src:url(/static/media/Inter-Thin.fff2a096db014f6239d4.woff2) format("woff2"),url(/static/media/Inter-Thin.29b9c616a95a912abf73.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:italic;font-weight:100;src:url(/static/media/Inter-ThinItalic.bf213704dce6b437ede4.woff2) format("woff2"),url(/static/media/Inter-ThinItalic.bae95eb2f889c797e435.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:normal;font-weight:200;src:url(/static/media/Inter-ExtraLight.72505e6a122c6acd5471.woff2) format("woff2"),url(/static/media/Inter-ExtraLight.c4248615291a9e8f1fb7.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:italic;font-weight:200;src:url(/static/media/Inter-ExtraLightItalic.5c7d7d6deb1d2ec8d48c.woff2) format("woff2"),url(/static/media/Inter-ExtraLightItalic.170dddfca278d3c2ad4a.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:normal;font-weight:300;src:url(/static/media/Inter-Light.2d5198822ab091ce4305.woff2) format("woff2"),url(/static/media/Inter-Light.994e34451cc19ede31d3.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:italic;font-weight:300;src:url(/static/media/Inter-LightItalic.f86952265d7b0f02c921.woff2) format("woff2"),url(/static/media/Inter-LightItalic.ef9f65d91d2b0ba9b2e4.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:normal;font-weight:400;src:url(/static/media/Inter-Regular.c8ba52b05a9ef10f4758.woff2) format("woff2"),url(/static/media/Inter-Regular.8c206db99195777c6769.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:italic;font-weight:400;src:url(/static/media/Inter-Italic.cb10ffd7684cd9836a05.woff2) format("woff2"),url(/static/media/Inter-Italic.890025e726861dba417f.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:normal;font-weight:500;src:url(/static/media/Inter-Medium.293fd13dbca5a3e450ef.woff2) format("woff2"),url(/static/media/Inter-Medium.9053572c46aeb4b16caa.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:italic;font-weight:500;src:url(/static/media/Inter-MediumItalic.085cb93e613ba3d40d2b.woff2) format("woff2"),url(/static/media/Inter-MediumItalic.3d0107dd43d0101274d3.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:normal;font-weight:600;src:url(/static/media/Inter-SemiBold.b5f0f109bc88052d4000.woff2) format("woff2"),url(/static/media/Inter-SemiBold.cca62d21c8c555c392e5.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:italic;font-weight:600;src:url(/static/media/Inter-SemiBoldItalic.d9467ee321a8f38aefff.woff2) format("woff2"),url(/static/media/Inter-SemiBoldItalic.463bdbfb28abad0fa6df.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:normal;font-weight:700;src:url(/static/media/Inter-Bold.ec64ea577b0349e055ad.woff2) format("woff2"),url(/static/media/Inter-Bold.93c1301bd9f486c573b3.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:italic;font-weight:700;src:url(/static/media/Inter-BoldItalic.2d26c56a606662486796.woff2) format("woff2"),url(/static/media/Inter-BoldItalic.b376885042f6c961a541.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:normal;font-weight:800;src:url(/static/media/Inter-ExtraBold.cbe0ae49c52c920fd563.woff2) format("woff2"),url(/static/media/Inter-ExtraBold.d0fa3bb2b7c9063dc594.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:italic;font-weight:800;src:url(/static/media/Inter-ExtraBoldItalic.535a6cf662596b3bd6a6.woff2) format("woff2"),url(/static/media/Inter-ExtraBoldItalic.6ab17abedc4d3f140953.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:normal;font-weight:900;src:url(/static/media/Inter-Black.15ca31c0a2a68f76d2d1.woff2) format("woff2"),url(/static/media/Inter-Black.c6938660eec019fefd68.woff) format("woff")}@font-face{font-display:swap;font-family:Inter;font-style:italic;font-weight:900;src:url(/static/media/Inter-BlackItalic.cb2a7335650c690077fe.woff2) format("woff2"),url(/static/media/Inter-BlackItalic.ca1e738e4f349f27514d.woff) format("woff")}@font-face{font-named-instance:"Regular";font-display:swap;font-family:Inter var;font-style:normal;font-weight:100 900;src:url(/static/media/Inter-roman.var.ba4caefcdf5b36b438db.woff2) format("woff2 supports variations(gvar)"),url(/static/media/Inter-roman.var.ba4caefcdf5b36b438db.woff2) format("woff2-variations"),url(/static/media/Inter-roman.var.ba4caefcdf5b36b438db.woff2) format("woff2")}@font-face{font-named-instance:"Italic";font-display:swap;font-family:Inter var;font-style:italic;font-weight:100 900;src:url(/static/media/Inter-italic.var.30807be7abc48ba8c73c.woff2) format("woff2 supports variations(gvar)"),url(/static/media/Inter-italic.var.30807be7abc48ba8c73c.woff2) format("woff2-variations"),url(/static/media/Inter-italic.var.30807be7abc48ba8c73c.woff2) format("woff2")}@font-face{font-display:swap;font-family:Inter var experimental;font-style:oblique 0deg 10deg;font-weight:100 900;src:url(/static/media/Inter.var.c2fe3cb2b7c746f7966a.woff2) format("woff2-variations"),url(/static/media/Inter.var.c2fe3cb2b7c746f7966a.woff2) format("woff2")}html{font-family:Inter,"system-ui"}@supports(font-variation-settings:normal){html{font-family:Inter var,"system-ui"}} \ No newline at end of file diff --git a/lama_cleaner/app/build/static/js/main.1fda6320.js b/lama_cleaner/app/build/static/js/main.1fda6320.js deleted file mode 100644 index c1af85b..0000000 --- a/lama_cleaner/app/build/static/js/main.1fda6320.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! For license information please see main.1fda6320.js.LICENSE.txt */ -(function(){var __webpack_modules__={157:function(module,__unused_webpack_exports,__webpack_require__){module=__webpack_require__.nmd(module),function _f(self){"use strict";try{module&&(self=module)}catch(e){}var t;function u(e){return"undefined"===typeof e||e}function aa(e){for(var t=Array(e),n=0;n=r))));v++);if(d)return i?ta(l,r,0):void(t[t.length]=l)}return!n&&l}function ta(e,t,n){return e=1===e.length?e[0]:[].concat.apply([],e),n||e.length>t?e.slice(n,n+t):e}function ua(e,t,n,r){return n?e=(e=e[(r=r&&t>n)?t:n])&&e[r?n:t]:e=e[t],e}function N(e,t,n,r,o){var i=0;if(e.constructor===Array)if(o)-1!==(t=e.indexOf(t))?1=this.B&&(a||!i[c])){var f=L(u,r,l),d="";switch(this.G){case"full":if(3f;p--)if(p-f>=this.B){var h=L(u,r,l,s,f);M(this,i,d=c.substring(f,p),h,e,n)}break}case"reverse":if(2=this.B&&M(this,i,d,L(u,r,l,s,p),e,n);d=""}case"forward":if(1=this.B&&M(this,i,d,f,e,n);break}default:if(this.C&&(f=Math.min(f/this.C(t,c,l)|0,u-1)),M(this,i,c,f,e,n),a&&1=this.B&&!s[c]){s[c]=1;var m=this.l&&c>f;M(this,o,m?f:c,L(d+(r/2>d?0:1),r,l,p-1,h-1),e,n,m?c:f)}}}}this.m||(this.register[e]=1)}}return this},t.search=function(e,t,n){n||(!t&&C(e)?e=(n=e).query:C(t)&&(n=t));var r,o,i,a,u,l=[],c=0;if(n){t=n.limit,c=n.offset||0;var s=n.context;o=n.suggest}if(e&&1<(r=(e=this.encode(e)).length)){n=v();for(var f,d=[],p=0,h=0;p=this.B&&!n[f]){if(!(this.s||o||this.map[f]))return l;d[h++]=f,n[f]=1}r=(e=d).length}if(!r)return l;for(t||(t=100),n=0,(s=this.depth&&1t||n)&&(o=o.slice(n,n+t)),r&&(o=za.call(this,o)),{tag:e,result:o}}function za(e){for(var t,n=Array(e.length),r=0;r"']/g,Z=RegExp(Y.source),$=RegExp(X.source),Q=/<%-([\s\S]+?)%>/g,J=/<%([\s\S]+?)%>/g,ee=/<%=([\s\S]+?)%>/g,te=/\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/,ne=/^\w*$/,re=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g,oe=/[\\^$.*+?()[\]{}|]/g,ie=RegExp(oe.source),ae=/^\s+/,ue=/\s/,le=/\{(?:\n\/\* \[wrapped with .+\] \*\/)?\n?/,ce=/\{\n\/\* \[wrapped with (.+)\] \*/,se=/,? & /,fe=/[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g,de=/[()=,{}\[\]\/\s]/,pe=/\\(\\)?/g,ve=/\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g,he=/\w*$/,me=/^[-+]0x[0-9a-f]+$/i,ge=/^0b[01]+$/i,ye=/^\[object .+?Constructor\]$/,be=/^0o[0-7]+$/i,we=/^(?:0|[1-9]\d*)$/,Ee=/[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g,xe=/($^)/,Ce=/['\n\r\u2028\u2029\\]/g,_e="\\ud800-\\udfff",Se="\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff",ke="\\u2700-\\u27bf",De="a-z\\xdf-\\xf6\\xf8-\\xff",Ae="A-Z\\xc0-\\xd6\\xd8-\\xde",Te="\\ufe0e\\ufe0f",Re="\\xac\\xb1\\xd7\\xf7\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf\\u2000-\\u206f \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000",Fe="['\u2019]",Pe="["+_e+"]",Oe="["+Re+"]",Ne="["+Se+"]",Le="\\d+",Me="["+ke+"]",je="["+De+"]",Ie="[^"+_e+Re+Le+ke+De+Ae+"]",Be="\\ud83c[\\udffb-\\udfff]",ze="[^"+_e+"]",Ve="(?:\\ud83c[\\udde6-\\uddff]){2}",Ue="[\\ud800-\\udbff][\\udc00-\\udfff]",We="["+Ae+"]",He="\\u200d",Ke="(?:"+je+"|"+Ie+")",qe="(?:"+We+"|"+Ie+")",Ge="(?:['\u2019](?:d|ll|m|re|s|t|ve))?",Ye="(?:['\u2019](?:D|LL|M|RE|S|T|VE))?",Xe="(?:"+Ne+"|"+Be+")"+"?",Ze="["+Te+"]?",$e=Ze+Xe+("(?:"+He+"(?:"+[ze,Ve,Ue].join("|")+")"+Ze+Xe+")*"),Qe="(?:"+[Me,Ve,Ue].join("|")+")"+$e,Je="(?:"+[ze+Ne+"?",Ne,Ve,Ue,Pe].join("|")+")",et=RegExp(Fe,"g"),tt=RegExp(Ne,"g"),nt=RegExp(Be+"(?="+Be+")|"+Je+$e,"g"),rt=RegExp([We+"?"+je+"+"+Ge+"(?="+[Oe,We,"$"].join("|")+")",qe+"+"+Ye+"(?="+[Oe,We+Ke,"$"].join("|")+")",We+"?"+Ke+"+"+Ge,We+"+"+Ye,"\\d*(?:1ST|2ND|3RD|(?![123])\\dTH)(?=\\b|[a-z_])","\\d*(?:1st|2nd|3rd|(?![123])\\dth)(?=\\b|[A-Z_])",Le,Qe].join("|"),"g"),ot=RegExp("["+He+_e+Se+Te+"]"),it=/[a-z][A-Z]|[A-Z]{2}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/,at=["Array","Buffer","DataView","Date","Error","Float32Array","Float64Array","Function","Int8Array","Int16Array","Int32Array","Map","Math","Object","Promise","RegExp","Set","String","Symbol","TypeError","Uint8Array","Uint8ClampedArray","Uint16Array","Uint32Array","WeakMap","_","clearTimeout","isFinite","parseInt","setTimeout"],ut=-1,lt={};lt[M]=lt[j]=lt[I]=lt[B]=lt[z]=lt[V]=lt[U]=lt[W]=lt[H]=!0,lt[y]=lt[b]=lt[N]=lt[w]=lt[L]=lt[E]=lt[x]=lt[C]=lt[S]=lt[k]=lt[D]=lt[T]=lt[R]=lt[F]=lt[O]=!1;var ct={};ct[y]=ct[b]=ct[N]=ct[L]=ct[w]=ct[E]=ct[M]=ct[j]=ct[I]=ct[B]=ct[z]=ct[S]=ct[k]=ct[D]=ct[T]=ct[R]=ct[F]=ct[P]=ct[V]=ct[U]=ct[W]=ct[H]=!0,ct[x]=ct[C]=ct[O]=!1;var st={"\\":"\\","'":"'","\n":"n","\r":"r","\u2028":"u2028","\u2029":"u2029"},ft=parseFloat,dt=parseInt,pt="object"==typeof n.g&&n.g&&n.g.Object===Object&&n.g,vt="object"==typeof self&&self&&self.Object===Object&&self,ht=pt||vt||Function("return this")(),mt=t&&!t.nodeType&&t,gt=mt&&e&&!e.nodeType&&e,yt=gt&>.exports===mt,bt=yt&&pt.process,wt=function(){try{var e=gt&>.require&>.require("util").types;return e||bt&&bt.binding&&bt.binding("util")}catch(t){}}(),Et=wt&&wt.isArrayBuffer,xt=wt&&wt.isDate,Ct=wt&&wt.isMap,_t=wt&&wt.isRegExp,St=wt&&wt.isSet,kt=wt&&wt.isTypedArray;function Dt(e,t,n){switch(n.length){case 0:return e.call(t);case 1:return e.call(t,n[0]);case 2:return e.call(t,n[0],n[1]);case 3:return e.call(t,n[0],n[1],n[2])}return e.apply(t,n)}function At(e,t,n,r){for(var o=-1,i=null==e?0:e.length;++o-1}function Nt(e,t,n){for(var r=-1,o=null==e?0:e.length;++r-1;);return n}function rn(e,t){for(var n=e.length;n--&&Wt(t,e[n],0)>-1;);return n}function on(e,t){for(var n=e.length,r=0;n--;)e[n]===t&&++r;return r}var an=Yt({"\xc0":"A","\xc1":"A","\xc2":"A","\xc3":"A","\xc4":"A","\xc5":"A","\xe0":"a","\xe1":"a","\xe2":"a","\xe3":"a","\xe4":"a","\xe5":"a","\xc7":"C","\xe7":"c","\xd0":"D","\xf0":"d","\xc8":"E","\xc9":"E","\xca":"E","\xcb":"E","\xe8":"e","\xe9":"e","\xea":"e","\xeb":"e","\xcc":"I","\xcd":"I","\xce":"I","\xcf":"I","\xec":"i","\xed":"i","\xee":"i","\xef":"i","\xd1":"N","\xf1":"n","\xd2":"O","\xd3":"O","\xd4":"O","\xd5":"O","\xd6":"O","\xd8":"O","\xf2":"o","\xf3":"o","\xf4":"o","\xf5":"o","\xf6":"o","\xf8":"o","\xd9":"U","\xda":"U","\xdb":"U","\xdc":"U","\xf9":"u","\xfa":"u","\xfb":"u","\xfc":"u","\xdd":"Y","\xfd":"y","\xff":"y","\xc6":"Ae","\xe6":"ae","\xde":"Th","\xfe":"th","\xdf":"ss","\u0100":"A","\u0102":"A","\u0104":"A","\u0101":"a","\u0103":"a","\u0105":"a","\u0106":"C","\u0108":"C","\u010a":"C","\u010c":"C","\u0107":"c","\u0109":"c","\u010b":"c","\u010d":"c","\u010e":"D","\u0110":"D","\u010f":"d","\u0111":"d","\u0112":"E","\u0114":"E","\u0116":"E","\u0118":"E","\u011a":"E","\u0113":"e","\u0115":"e","\u0117":"e","\u0119":"e","\u011b":"e","\u011c":"G","\u011e":"G","\u0120":"G","\u0122":"G","\u011d":"g","\u011f":"g","\u0121":"g","\u0123":"g","\u0124":"H","\u0126":"H","\u0125":"h","\u0127":"h","\u0128":"I","\u012a":"I","\u012c":"I","\u012e":"I","\u0130":"I","\u0129":"i","\u012b":"i","\u012d":"i","\u012f":"i","\u0131":"i","\u0134":"J","\u0135":"j","\u0136":"K","\u0137":"k","\u0138":"k","\u0139":"L","\u013b":"L","\u013d":"L","\u013f":"L","\u0141":"L","\u013a":"l","\u013c":"l","\u013e":"l","\u0140":"l","\u0142":"l","\u0143":"N","\u0145":"N","\u0147":"N","\u014a":"N","\u0144":"n","\u0146":"n","\u0148":"n","\u014b":"n","\u014c":"O","\u014e":"O","\u0150":"O","\u014d":"o","\u014f":"o","\u0151":"o","\u0154":"R","\u0156":"R","\u0158":"R","\u0155":"r","\u0157":"r","\u0159":"r","\u015a":"S","\u015c":"S","\u015e":"S","\u0160":"S","\u015b":"s","\u015d":"s","\u015f":"s","\u0161":"s","\u0162":"T","\u0164":"T","\u0166":"T","\u0163":"t","\u0165":"t","\u0167":"t","\u0168":"U","\u016a":"U","\u016c":"U","\u016e":"U","\u0170":"U","\u0172":"U","\u0169":"u","\u016b":"u","\u016d":"u","\u016f":"u","\u0171":"u","\u0173":"u","\u0174":"W","\u0175":"w","\u0176":"Y","\u0177":"y","\u0178":"Y","\u0179":"Z","\u017b":"Z","\u017d":"Z","\u017a":"z","\u017c":"z","\u017e":"z","\u0132":"IJ","\u0133":"ij","\u0152":"Oe","\u0153":"oe","\u0149":"'n","\u017f":"s"}),un=Yt({"&":"&","<":"<",">":">",'"':""","'":"'"});function ln(e){return"\\"+st[e]}function cn(e){return ot.test(e)}function sn(e){var t=-1,n=Array(e.size);return e.forEach((function(e,r){n[++t]=[r,e]})),n}function fn(e,t){return function(n){return e(t(n))}}function dn(e,t){for(var n=-1,r=e.length,o=0,i=[];++n",""":'"',"'":"'"});var bn=function e(t){var n=(t=null==t?ht:bn.defaults(ht.Object(),t,bn.pick(ht,at))).Array,r=t.Date,ue=t.Error,_e=t.Function,Se=t.Math,ke=t.Object,De=t.RegExp,Ae=t.String,Te=t.TypeError,Re=n.prototype,Fe=_e.prototype,Pe=ke.prototype,Oe=t["__core-js_shared__"],Ne=Fe.toString,Le=Pe.hasOwnProperty,Me=0,je=function(){var e=/[^.]+$/.exec(Oe&&Oe.keys&&Oe.keys.IE_PROTO||"");return e?"Symbol(src)_1."+e:""}(),Ie=Pe.toString,Be=Ne.call(ke),ze=ht._,Ve=De("^"+Ne.call(Le).replace(oe,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$"),Ue=yt?t.Buffer:o,We=t.Symbol,He=t.Uint8Array,Ke=Ue?Ue.allocUnsafe:o,qe=fn(ke.getPrototypeOf,ke),Ge=ke.create,Ye=Pe.propertyIsEnumerable,Xe=Re.splice,Ze=We?We.isConcatSpreadable:o,$e=We?We.iterator:o,Qe=We?We.toStringTag:o,Je=function(){try{var e=pi(ke,"defineProperty");return e({},"",{}),e}catch(t){}}(),nt=t.clearTimeout!==ht.clearTimeout&&t.clearTimeout,ot=r&&r.now!==ht.Date.now&&r.now,st=t.setTimeout!==ht.setTimeout&&t.setTimeout,pt=Se.ceil,vt=Se.floor,mt=ke.getOwnPropertySymbols,gt=Ue?Ue.isBuffer:o,bt=t.isFinite,wt=Re.join,zt=fn(ke.keys,ke),Yt=Se.max,wn=Se.min,En=r.now,xn=t.parseInt,Cn=Se.random,_n=Re.reverse,Sn=pi(t,"DataView"),kn=pi(t,"Map"),Dn=pi(t,"Promise"),An=pi(t,"Set"),Tn=pi(t,"WeakMap"),Rn=pi(ke,"create"),Fn=Tn&&new Tn,Pn={},On=zi(Sn),Nn=zi(kn),Ln=zi(Dn),Mn=zi(An),jn=zi(Tn),In=We?We.prototype:o,Bn=In?In.valueOf:o,zn=In?In.toString:o;function Vn(e){if(ru(e)&&!qa(e)&&!(e instanceof Kn)){if(e instanceof Hn)return e;if(Le.call(e,"__wrapped__"))return Vi(e)}return new Hn(e)}var Un=function(){function e(){}return function(t){if(!nu(t))return{};if(Ge)return Ge(t);e.prototype=t;var n=new e;return e.prototype=o,n}}();function Wn(){}function Hn(e,t){this.__wrapped__=e,this.__actions__=[],this.__chain__=!!t,this.__index__=0,this.__values__=o}function Kn(e){this.__wrapped__=e,this.__actions__=[],this.__dir__=1,this.__filtered__=!1,this.__iteratees__=[],this.__takeCount__=m,this.__views__=[]}function qn(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t=t?e:t)),e}function cr(e,t,n,r,i,a){var u,l=1&t,c=2&t,s=4&t;if(n&&(u=i?n(e,r,i,a):n(e)),u!==o)return u;if(!nu(e))return e;var f=qa(e);if(f){if(u=function(e){var t=e.length,n=new e.constructor(t);t&&"string"==typeof e[0]&&Le.call(e,"index")&&(n.index=e.index,n.input=e.input);return n}(e),!l)return Fo(e,u)}else{var d=mi(e),p=d==C||d==_;if(Za(e))return So(e,l);if(d==D||d==y||p&&!i){if(u=c||p?{}:yi(e),!l)return c?function(e,t){return Po(e,hi(e),t)}(e,function(e,t){return e&&Po(t,Nu(t),e)}(u,e)):function(e,t){return Po(e,vi(e),t)}(e,ir(u,e))}else{if(!ct[d])return i?e:{};u=function(e,t,n){var r=e.constructor;switch(t){case N:return ko(e);case w:case E:return new r(+e);case L:return function(e,t){var n=t?ko(e.buffer):e.buffer;return new e.constructor(n,e.byteOffset,e.byteLength)}(e,n);case M:case j:case I:case B:case z:case V:case U:case W:case H:return Do(e,n);case S:return new r;case k:case F:return new r(e);case T:return function(e){var t=new e.constructor(e.source,he.exec(e));return t.lastIndex=e.lastIndex,t}(e);case R:return new r;case P:return o=e,Bn?ke(Bn.call(o)):{}}var o}(e,d,l)}}a||(a=new Zn);var v=a.get(e);if(v)return v;a.set(e,u),lu(e)?e.forEach((function(r){u.add(cr(r,t,n,r,e,a))})):ou(e)&&e.forEach((function(r,o){u.set(o,cr(r,t,n,o,e,a))}));var h=f?o:(s?c?ai:ii:c?Nu:Ou)(e);return Tt(h||e,(function(r,o){h&&(r=e[o=r]),nr(u,o,cr(r,t,n,o,e,a))})),u}function sr(e,t,n){var r=n.length;if(null==e)return!r;for(e=ke(e);r--;){var i=n[r],a=t[i],u=e[i];if(u===o&&!(i in e)||!a(u))return!1}return!0}function fr(e,t,n){if("function"!=typeof e)throw new Te(i);return Oi((function(){e.apply(o,n)}),t)}function dr(e,t,n,r){var o=-1,i=Ot,a=!0,u=e.length,l=[],c=t.length;if(!u)return l;n&&(t=Lt(t,Jt(n))),r?(i=Nt,a=!1):t.length>=200&&(i=tn,a=!1,t=new Xn(t));e:for(;++o-1},Gn.prototype.set=function(e,t){var n=this.__data__,r=rr(n,e);return r<0?(++this.size,n.push([e,t])):n[r][1]=t,this},Yn.prototype.clear=function(){this.size=0,this.__data__={hash:new qn,map:new(kn||Gn),string:new qn}},Yn.prototype.delete=function(e){var t=fi(this,e).delete(e);return this.size-=t?1:0,t},Yn.prototype.get=function(e){return fi(this,e).get(e)},Yn.prototype.has=function(e){return fi(this,e).has(e)},Yn.prototype.set=function(e,t){var n=fi(this,e),r=n.size;return n.set(e,t),this.size+=n.size==r?0:1,this},Xn.prototype.add=Xn.prototype.push=function(e){return this.__data__.set(e,a),this},Xn.prototype.has=function(e){return this.__data__.has(e)},Zn.prototype.clear=function(){this.__data__=new Gn,this.size=0},Zn.prototype.delete=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n},Zn.prototype.get=function(e){return this.__data__.get(e)},Zn.prototype.has=function(e){return this.__data__.has(e)},Zn.prototype.set=function(e,t){var n=this.__data__;if(n instanceof Gn){var r=n.__data__;if(!kn||r.length<199)return r.push([e,t]),this.size=++n.size,this;n=this.__data__=new Yn(r)}return n.set(e,t),this.size=n.size,this};var pr=Lo(Er),vr=Lo(xr,!0);function hr(e,t){var n=!0;return pr(e,(function(e,r,o){return n=!!t(e,r,o)})),n}function mr(e,t,n){for(var r=-1,i=e.length;++r0&&n(u)?t>1?yr(u,t-1,n,r,o):Mt(o,u):r||(o[o.length]=u)}return o}var br=Mo(),wr=Mo(!0);function Er(e,t){return e&&br(e,t,Ou)}function xr(e,t){return e&&wr(e,t,Ou)}function Cr(e,t){return Pt(t,(function(t){return Ja(e[t])}))}function _r(e,t){for(var n=0,r=(t=Eo(t,e)).length;null!=e&&nt}function Ar(e,t){return null!=e&&Le.call(e,t)}function Tr(e,t){return null!=e&&t in ke(e)}function Rr(e,t,r){for(var i=r?Nt:Ot,a=e[0].length,u=e.length,l=u,c=n(u),s=1/0,f=[];l--;){var d=e[l];l&&t&&(d=Lt(d,Jt(t))),s=wn(d.length,s),c[l]=!r&&(t||a>=120&&d.length>=120)?new Xn(l&&d):o}d=e[0];var p=-1,v=c[0];e:for(;++p=u?l:l*("desc"==n[r]?-1:1)}return e.index-t.index}(e,t,n)}))}function qr(e,t,n){for(var r=-1,o=t.length,i={};++r-1;)u!==e&&Xe.call(u,l,1),Xe.call(e,l,1);return e}function Yr(e,t){for(var n=e?t.length:0,r=n-1;n--;){var o=t[n];if(n==r||o!==i){var i=o;wi(o)?Xe.call(e,o,1):po(e,o)}}return e}function Xr(e,t){return e+vt(Cn()*(t-e+1))}function Zr(e,t){var n="";if(!e||t<1||t>v)return n;do{t%2&&(n+=e),(t=vt(t/2))&&(e+=e)}while(t);return n}function $r(e,t){return Ni(Ai(e,t,il),e+"")}function Qr(e){return Qn(Uu(e))}function Jr(e,t){var n=Uu(e);return ji(n,lr(t,0,n.length))}function eo(e,t,n,r){if(!nu(e))return e;for(var i=-1,a=(t=Eo(t,e)).length,u=a-1,l=e;null!=l&&++ii?0:i+t),(r=r>i?i:r)<0&&(r+=i),i=t>r?0:r-t>>>0,t>>>=0;for(var a=n(i);++o>>1,a=e[i];null!==a&&!su(a)&&(n?a<=t:a=200){var c=t?null:$o(e);if(c)return pn(c);a=!1,o=tn,l=new Xn}else l=t?[]:u;e:for(;++r=r?e:oo(e,t,n)}var _o=nt||function(e){return ht.clearTimeout(e)};function So(e,t){if(t)return e.slice();var n=e.length,r=Ke?Ke(n):new e.constructor(n);return e.copy(r),r}function ko(e){var t=new e.constructor(e.byteLength);return new He(t).set(new He(e)),t}function Do(e,t){var n=t?ko(e.buffer):e.buffer;return new e.constructor(n,e.byteOffset,e.length)}function Ao(e,t){if(e!==t){var n=e!==o,r=null===e,i=e===e,a=su(e),u=t!==o,l=null===t,c=t===t,s=su(t);if(!l&&!s&&!a&&e>t||a&&u&&c&&!l&&!s||r&&u&&c||!n&&c||!i)return 1;if(!r&&!a&&!s&&e1?n[i-1]:o,u=i>2?n[2]:o;for(a=e.length>3&&"function"==typeof a?(i--,a):o,u&&Ei(n[0],n[1],u)&&(a=i<3?o:a,i=1),t=ke(t);++r-1?i[a?t[u]:u]:o}}function Vo(e){return oi((function(t){var n=t.length,r=n,a=Hn.prototype.thru;for(e&&t.reverse();r--;){var u=t[r];if("function"!=typeof u)throw new Te(i);if(a&&!l&&"wrapper"==li(u))var l=new Hn([],!0)}for(r=l?r:n;++r1&&b.reverse(),p&&sl))return!1;var s=a.get(e),f=a.get(t);if(s&&f)return s==t&&f==e;var d=-1,p=!0,v=2&n?new Xn:o;for(a.set(e,t),a.set(t,e);++d-1&&e%1==0&&e1?"& ":"")+t[r],t=t.join(n>2?", ":" "),e.replace(le,"{\n/* [wrapped with "+t+"] */\n")}(r,function(e,t){return Tt(g,(function(n){var r="_."+n[0];t&n[1]&&!Ot(e,r)&&e.push(r)})),e.sort()}(function(e){var t=e.match(ce);return t?t[1].split(se):[]}(r),n)))}function Mi(e){var t=0,n=0;return function(){var r=En(),i=16-(r-n);if(n=r,i>0){if(++t>=800)return arguments[0]}else t=0;return e.apply(o,arguments)}}function ji(e,t){var n=-1,r=e.length,i=r-1;for(t=t===o?r:t;++n1?e[t-1]:o;return n="function"==typeof n?(e.pop(),n):o,ua(e,n)}));function va(e){var t=Vn(e);return t.__chain__=!0,t}function ha(e,t){return t(e)}var ma=oi((function(e){var t=e.length,n=t?e[0]:0,r=this.__wrapped__,i=function(t){return ur(t,e)};return!(t>1||this.__actions__.length)&&r instanceof Kn&&wi(n)?((r=r.slice(n,+n+(t?1:0))).__actions__.push({func:ha,args:[i],thisArg:o}),new Hn(r,this.__chain__).thru((function(e){return t&&!e.length&&e.push(o),e}))):this.thru(i)}));var ga=Oo((function(e,t,n){Le.call(e,n)?++e[n]:ar(e,n,1)}));var ya=zo(Ki),ba=zo(qi);function wa(e,t){return(qa(e)?Tt:pr)(e,si(t,3))}function Ea(e,t){return(qa(e)?Rt:vr)(e,si(t,3))}var xa=Oo((function(e,t,n){Le.call(e,n)?e[n].push(t):ar(e,n,[t])}));var Ca=$r((function(e,t,r){var o=-1,i="function"==typeof t,a=Ya(e)?n(e.length):[];return pr(e,(function(e){a[++o]=i?Dt(t,e,r):Fr(e,t,r)})),a})),_a=Oo((function(e,t,n){ar(e,n,t)}));function Sa(e,t){return(qa(e)?Lt:zr)(e,si(t,3))}var ka=Oo((function(e,t,n){e[n?0:1].push(t)}),(function(){return[[],[]]}));var Da=$r((function(e,t){if(null==e)return[];var n=t.length;return n>1&&Ei(e,t[0],t[1])?t=[]:n>2&&Ei(t[0],t[1],t[2])&&(t=[t[0]]),Kr(e,yr(t,1),[])})),Aa=ot||function(){return ht.Date.now()};function Ta(e,t,n){return t=n?o:t,t=e&&null==t?e.length:t,Jo(e,f,o,o,o,o,t)}function Ra(e,t){var n;if("function"!=typeof t)throw new Te(i);return e=mu(e),function(){return--e>0&&(n=t.apply(this,arguments)),e<=1&&(t=o),n}}var Fa=$r((function(e,t,n){var r=1;if(n.length){var o=dn(n,ci(Fa));r|=c}return Jo(e,r,t,n,o)})),Pa=$r((function(e,t,n){var r=3;if(n.length){var o=dn(n,ci(Pa));r|=c}return Jo(t,r,e,n,o)}));function Oa(e,t,n){var r,a,u,l,c,s,f=0,d=!1,p=!1,v=!0;if("function"!=typeof e)throw new Te(i);function h(t){var n=r,i=a;return r=a=o,f=t,l=e.apply(i,n)}function m(e){return f=e,c=Oi(y,t),d?h(e):l}function g(e){var n=e-s;return s===o||n>=t||n<0||p&&e-f>=u}function y(){var e=Aa();if(g(e))return b(e);c=Oi(y,function(e){var n=t-(e-s);return p?wn(n,u-(e-f)):n}(e))}function b(e){return c=o,v&&r?h(e):(r=a=o,l)}function w(){var e=Aa(),n=g(e);if(r=arguments,a=this,s=e,n){if(c===o)return m(s);if(p)return _o(c),c=Oi(y,t),h(s)}return c===o&&(c=Oi(y,t)),l}return t=yu(t)||0,nu(n)&&(d=!!n.leading,u=(p="maxWait"in n)?Yt(yu(n.maxWait)||0,t):u,v="trailing"in n?!!n.trailing:v),w.cancel=function(){c!==o&&_o(c),f=0,r=s=a=c=o},w.flush=function(){return c===o?l:b(Aa())},w}var Na=$r((function(e,t){return fr(e,1,t)})),La=$r((function(e,t,n){return fr(e,yu(t)||0,n)}));function Ma(e,t){if("function"!=typeof e||null!=t&&"function"!=typeof t)throw new Te(i);var n=function n(){var r=arguments,o=t?t.apply(this,r):r[0],i=n.cache;if(i.has(o))return i.get(o);var a=e.apply(this,r);return n.cache=i.set(o,a)||i,a};return n.cache=new(Ma.Cache||Yn),n}function ja(e){if("function"!=typeof e)throw new Te(i);return function(){var t=arguments;switch(t.length){case 0:return!e.call(this);case 1:return!e.call(this,t[0]);case 2:return!e.call(this,t[0],t[1]);case 3:return!e.call(this,t[0],t[1],t[2])}return!e.apply(this,t)}}Ma.Cache=Yn;var Ia=xo((function(e,t){var n=(t=1==t.length&&qa(t[0])?Lt(t[0],Jt(si())):Lt(yr(t,1),Jt(si()))).length;return $r((function(r){for(var o=-1,i=wn(r.length,n);++o=t})),Ka=Pr(function(){return arguments}())?Pr:function(e){return ru(e)&&Le.call(e,"callee")&&!Ye.call(e,"callee")},qa=n.isArray,Ga=Et?Jt(Et):function(e){return ru(e)&&kr(e)==N};function Ya(e){return null!=e&&tu(e.length)&&!Ja(e)}function Xa(e){return ru(e)&&Ya(e)}var Za=gt||yl,$a=xt?Jt(xt):function(e){return ru(e)&&kr(e)==E};function Qa(e){if(!ru(e))return!1;var t=kr(e);return t==x||"[object DOMException]"==t||"string"==typeof e.message&&"string"==typeof e.name&&!au(e)}function Ja(e){if(!nu(e))return!1;var t=kr(e);return t==C||t==_||"[object AsyncFunction]"==t||"[object Proxy]"==t}function eu(e){return"number"==typeof e&&e==mu(e)}function tu(e){return"number"==typeof e&&e>-1&&e%1==0&&e<=v}function nu(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}function ru(e){return null!=e&&"object"==typeof e}var ou=Ct?Jt(Ct):function(e){return ru(e)&&mi(e)==S};function iu(e){return"number"==typeof e||ru(e)&&kr(e)==k}function au(e){if(!ru(e)||kr(e)!=D)return!1;var t=qe(e);if(null===t)return!0;var n=Le.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&Ne.call(n)==Be}var uu=_t?Jt(_t):function(e){return ru(e)&&kr(e)==T};var lu=St?Jt(St):function(e){return ru(e)&&mi(e)==R};function cu(e){return"string"==typeof e||!qa(e)&&ru(e)&&kr(e)==F}function su(e){return"symbol"==typeof e||ru(e)&&kr(e)==P}var fu=kt?Jt(kt):function(e){return ru(e)&&tu(e.length)&&!!lt[kr(e)]};var du=Yo(Br),pu=Yo((function(e,t){return e<=t}));function vu(e){if(!e)return[];if(Ya(e))return cu(e)?mn(e):Fo(e);if($e&&e[$e])return function(e){for(var t,n=[];!(t=e.next()).done;)n.push(t.value);return n}(e[$e]());var t=mi(e);return(t==S?sn:t==R?pn:Uu)(e)}function hu(e){return e?(e=yu(e))===p||e===-1/0?17976931348623157e292*(e<0?-1:1):e===e?e:0:0===e?e:0}function mu(e){var t=hu(e),n=t%1;return t===t?n?t-n:t:0}function gu(e){return e?lr(mu(e),0,m):0}function yu(e){if("number"==typeof e)return e;if(su(e))return h;if(nu(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=nu(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=Qt(e);var n=ge.test(e);return n||be.test(e)?dt(e.slice(2),n?2:8):me.test(e)?h:+e}function bu(e){return Po(e,Nu(e))}function wu(e){return null==e?"":so(e)}var Eu=No((function(e,t){if(Si(t)||Ya(t))Po(t,Ou(t),e);else for(var n in t)Le.call(t,n)&&nr(e,n,t[n])})),xu=No((function(e,t){Po(t,Nu(t),e)})),Cu=No((function(e,t,n,r){Po(t,Nu(t),e,r)})),_u=No((function(e,t,n,r){Po(t,Ou(t),e,r)})),Su=oi(ur);var ku=$r((function(e,t){e=ke(e);var n=-1,r=t.length,i=r>2?t[2]:o;for(i&&Ei(t[0],t[1],i)&&(r=1);++n1),t})),Po(e,ai(e),n),r&&(n=cr(n,7,ni));for(var o=t.length;o--;)po(n,t[o]);return n}));var Iu=oi((function(e,t){return null==e?{}:function(e,t){return qr(e,t,(function(t,n){return Tu(e,n)}))}(e,t)}));function Bu(e,t){if(null==e)return{};var n=Lt(ai(e),(function(e){return[e]}));return t=si(t),qr(e,n,(function(e,n){return t(e,n[0])}))}var zu=Qo(Ou),Vu=Qo(Nu);function Uu(e){return null==e?[]:en(e,Ou(e))}var Wu=Io((function(e,t,n){return t=t.toLowerCase(),e+(n?Hu(t):t)}));function Hu(e){return Qu(wu(e).toLowerCase())}function Ku(e){return(e=wu(e))&&e.replace(Ee,an).replace(tt,"")}var qu=Io((function(e,t,n){return e+(n?"-":"")+t.toLowerCase()})),Gu=Io((function(e,t,n){return e+(n?" ":"")+t.toLowerCase()})),Yu=jo("toLowerCase");var Xu=Io((function(e,t,n){return e+(n?"_":"")+t.toLowerCase()}));var Zu=Io((function(e,t,n){return e+(n?" ":"")+Qu(t)}));var $u=Io((function(e,t,n){return e+(n?" ":"")+t.toUpperCase()})),Qu=jo("toUpperCase");function Ju(e,t,n){return e=wu(e),(t=n?o:t)===o?function(e){return it.test(e)}(e)?function(e){return e.match(rt)||[]}(e):function(e){return e.match(fe)||[]}(e):e.match(t)||[]}var el=$r((function(e,t){try{return Dt(e,o,t)}catch(n){return Qa(n)?n:new ue(n)}})),tl=oi((function(e,t){return Tt(t,(function(t){t=Bi(t),ar(e,t,Fa(e[t],e))})),e}));function nl(e){return function(){return e}}var rl=Vo(),ol=Vo(!0);function il(e){return e}function al(e){return Mr("function"==typeof e?e:cr(e,1))}var ul=$r((function(e,t){return function(n){return Fr(n,e,t)}})),ll=$r((function(e,t){return function(n){return Fr(e,n,t)}}));function cl(e,t,n){var r=Ou(t),o=Cr(t,r);null!=n||nu(t)&&(o.length||!r.length)||(n=t,t=e,e=this,o=Cr(t,Ou(t)));var i=!(nu(n)&&"chain"in n)||!!n.chain,a=Ja(e);return Tt(o,(function(n){var r=t[n];e[n]=r,a&&(e.prototype[n]=function(){var t=this.__chain__;if(i||t){var n=e(this.__wrapped__),o=n.__actions__=Fo(this.__actions__);return o.push({func:r,args:arguments,thisArg:e}),n.__chain__=t,n}return r.apply(e,Mt([this.value()],arguments))})})),e}function sl(){}var fl=Ko(Lt),dl=Ko(Ft),pl=Ko(Bt);function vl(e){return xi(e)?Gt(Bi(e)):function(e){return function(t){return _r(t,e)}}(e)}var hl=Go(),ml=Go(!0);function gl(){return[]}function yl(){return!1}var bl=Ho((function(e,t){return e+t}),0),wl=Zo("ceil"),El=Ho((function(e,t){return e/t}),1),xl=Zo("floor");var Cl=Ho((function(e,t){return e*t}),1),_l=Zo("round"),Sl=Ho((function(e,t){return e-t}),0);return Vn.after=function(e,t){if("function"!=typeof t)throw new Te(i);return e=mu(e),function(){if(--e<1)return t.apply(this,arguments)}},Vn.ary=Ta,Vn.assign=Eu,Vn.assignIn=xu,Vn.assignInWith=Cu,Vn.assignWith=_u,Vn.at=Su,Vn.before=Ra,Vn.bind=Fa,Vn.bindAll=tl,Vn.bindKey=Pa,Vn.castArray=function(){if(!arguments.length)return[];var e=arguments[0];return qa(e)?e:[e]},Vn.chain=va,Vn.chunk=function(e,t,r){t=(r?Ei(e,t,r):t===o)?1:Yt(mu(t),0);var i=null==e?0:e.length;if(!i||t<1)return[];for(var a=0,u=0,l=n(pt(i/t));ai?0:i+n),(r=r===o||r>i?i:mu(r))<0&&(r+=i),r=n>r?0:gu(r);n>>0)?(e=wu(e))&&("string"==typeof t||null!=t&&!uu(t))&&!(t=so(t))&&cn(e)?Co(mn(e),0,n):e.split(t,n):[]},Vn.spread=function(e,t){if("function"!=typeof e)throw new Te(i);return t=null==t?0:Yt(mu(t),0),$r((function(n){var r=n[t],o=Co(n,0,t);return r&&Mt(o,r),Dt(e,this,o)}))},Vn.tail=function(e){var t=null==e?0:e.length;return t?oo(e,1,t):[]},Vn.take=function(e,t,n){return e&&e.length?oo(e,0,(t=n||t===o?1:mu(t))<0?0:t):[]},Vn.takeRight=function(e,t,n){var r=null==e?0:e.length;return r?oo(e,(t=r-(t=n||t===o?1:mu(t)))<0?0:t,r):[]},Vn.takeRightWhile=function(e,t){return e&&e.length?ho(e,si(t,3),!1,!0):[]},Vn.takeWhile=function(e,t){return e&&e.length?ho(e,si(t,3)):[]},Vn.tap=function(e,t){return t(e),e},Vn.throttle=function(e,t,n){var r=!0,o=!0;if("function"!=typeof e)throw new Te(i);return nu(n)&&(r="leading"in n?!!n.leading:r,o="trailing"in n?!!n.trailing:o),Oa(e,t,{leading:r,maxWait:t,trailing:o})},Vn.thru=ha,Vn.toArray=vu,Vn.toPairs=zu,Vn.toPairsIn=Vu,Vn.toPath=function(e){return qa(e)?Lt(e,Bi):su(e)?[e]:Fo(Ii(wu(e)))},Vn.toPlainObject=bu,Vn.transform=function(e,t,n){var r=qa(e),o=r||Za(e)||fu(e);if(t=si(t,4),null==n){var i=e&&e.constructor;n=o?r?new i:[]:nu(e)&&Ja(i)?Un(qe(e)):{}}return(o?Tt:Er)(e,(function(e,r,o){return t(n,e,r,o)})),n},Vn.unary=function(e){return Ta(e,1)},Vn.union=ra,Vn.unionBy=oa,Vn.unionWith=ia,Vn.uniq=function(e){return e&&e.length?fo(e):[]},Vn.uniqBy=function(e,t){return e&&e.length?fo(e,si(t,2)):[]},Vn.uniqWith=function(e,t){return t="function"==typeof t?t:o,e&&e.length?fo(e,o,t):[]},Vn.unset=function(e,t){return null==e||po(e,t)},Vn.unzip=aa,Vn.unzipWith=ua,Vn.update=function(e,t,n){return null==e?e:vo(e,t,wo(n))},Vn.updateWith=function(e,t,n,r){return r="function"==typeof r?r:o,null==e?e:vo(e,t,wo(n),r)},Vn.values=Uu,Vn.valuesIn=function(e){return null==e?[]:en(e,Nu(e))},Vn.without=la,Vn.words=Ju,Vn.wrap=function(e,t){return Ba(wo(t),e)},Vn.xor=ca,Vn.xorBy=sa,Vn.xorWith=fa,Vn.zip=da,Vn.zipObject=function(e,t){return yo(e||[],t||[],nr)},Vn.zipObjectDeep=function(e,t){return yo(e||[],t||[],eo)},Vn.zipWith=pa,Vn.entries=zu,Vn.entriesIn=Vu,Vn.extend=xu,Vn.extendWith=Cu,cl(Vn,Vn),Vn.add=bl,Vn.attempt=el,Vn.camelCase=Wu,Vn.capitalize=Hu,Vn.ceil=wl,Vn.clamp=function(e,t,n){return n===o&&(n=t,t=o),n!==o&&(n=(n=yu(n))===n?n:0),t!==o&&(t=(t=yu(t))===t?t:0),lr(yu(e),t,n)},Vn.clone=function(e){return cr(e,4)},Vn.cloneDeep=function(e){return cr(e,5)},Vn.cloneDeepWith=function(e,t){return cr(e,5,t="function"==typeof t?t:o)},Vn.cloneWith=function(e,t){return cr(e,4,t="function"==typeof t?t:o)},Vn.conformsTo=function(e,t){return null==t||sr(e,t,Ou(t))},Vn.deburr=Ku,Vn.defaultTo=function(e,t){return null==e||e!==e?t:e},Vn.divide=El,Vn.endsWith=function(e,t,n){e=wu(e),t=so(t);var r=e.length,i=n=n===o?r:lr(mu(n),0,r);return(n-=t.length)>=0&&e.slice(n,i)==t},Vn.eq=Ua,Vn.escape=function(e){return(e=wu(e))&&$.test(e)?e.replace(X,un):e},Vn.escapeRegExp=function(e){return(e=wu(e))&&ie.test(e)?e.replace(oe,"\\$&"):e},Vn.every=function(e,t,n){var r=qa(e)?Ft:hr;return n&&Ei(e,t,n)&&(t=o),r(e,si(t,3))},Vn.find=ya,Vn.findIndex=Ki,Vn.findKey=function(e,t){return Vt(e,si(t,3),Er)},Vn.findLast=ba,Vn.findLastIndex=qi,Vn.findLastKey=function(e,t){return Vt(e,si(t,3),xr)},Vn.floor=xl,Vn.forEach=wa,Vn.forEachRight=Ea,Vn.forIn=function(e,t){return null==e?e:br(e,si(t,3),Nu)},Vn.forInRight=function(e,t){return null==e?e:wr(e,si(t,3),Nu)},Vn.forOwn=function(e,t){return e&&Er(e,si(t,3))},Vn.forOwnRight=function(e,t){return e&&xr(e,si(t,3))},Vn.get=Au,Vn.gt=Wa,Vn.gte=Ha,Vn.has=function(e,t){return null!=e&&gi(e,t,Ar)},Vn.hasIn=Tu,Vn.head=Yi,Vn.identity=il,Vn.includes=function(e,t,n,r){e=Ya(e)?e:Uu(e),n=n&&!r?mu(n):0;var o=e.length;return n<0&&(n=Yt(o+n,0)),cu(e)?n<=o&&e.indexOf(t,n)>-1:!!o&&Wt(e,t,n)>-1},Vn.indexOf=function(e,t,n){var r=null==e?0:e.length;if(!r)return-1;var o=null==n?0:mu(n);return o<0&&(o=Yt(r+o,0)),Wt(e,t,o)},Vn.inRange=function(e,t,n){return t=hu(t),n===o?(n=t,t=0):n=hu(n),function(e,t,n){return e>=wn(t,n)&&e=-9007199254740991&&e<=v},Vn.isSet=lu,Vn.isString=cu,Vn.isSymbol=su,Vn.isTypedArray=fu,Vn.isUndefined=function(e){return e===o},Vn.isWeakMap=function(e){return ru(e)&&mi(e)==O},Vn.isWeakSet=function(e){return ru(e)&&"[object WeakSet]"==kr(e)},Vn.join=function(e,t){return null==e?"":wt.call(e,t)},Vn.kebabCase=qu,Vn.last=Qi,Vn.lastIndexOf=function(e,t,n){var r=null==e?0:e.length;if(!r)return-1;var i=r;return n!==o&&(i=(i=mu(n))<0?Yt(r+i,0):wn(i,r-1)),t===t?function(e,t,n){for(var r=n+1;r--;)if(e[r]===t)return r;return r}(e,t,i):Ut(e,Kt,i,!0)},Vn.lowerCase=Gu,Vn.lowerFirst=Yu,Vn.lt=du,Vn.lte=pu,Vn.max=function(e){return e&&e.length?mr(e,il,Dr):o},Vn.maxBy=function(e,t){return e&&e.length?mr(e,si(t,2),Dr):o},Vn.mean=function(e){return qt(e,il)},Vn.meanBy=function(e,t){return qt(e,si(t,2))},Vn.min=function(e){return e&&e.length?mr(e,il,Br):o},Vn.minBy=function(e,t){return e&&e.length?mr(e,si(t,2),Br):o},Vn.stubArray=gl,Vn.stubFalse=yl,Vn.stubObject=function(){return{}},Vn.stubString=function(){return""},Vn.stubTrue=function(){return!0},Vn.multiply=Cl,Vn.nth=function(e,t){return e&&e.length?Hr(e,mu(t)):o},Vn.noConflict=function(){return ht._===this&&(ht._=ze),this},Vn.noop=sl,Vn.now=Aa,Vn.pad=function(e,t,n){e=wu(e);var r=(t=mu(t))?hn(e):0;if(!t||r>=t)return e;var o=(t-r)/2;return qo(vt(o),n)+e+qo(pt(o),n)},Vn.padEnd=function(e,t,n){e=wu(e);var r=(t=mu(t))?hn(e):0;return t&&rt){var r=e;e=t,t=r}if(n||e%1||t%1){var i=Cn();return wn(e+i*(t-e+ft("1e-"+((i+"").length-1))),t)}return Xr(e,t)},Vn.reduce=function(e,t,n){var r=qa(e)?jt:Xt,o=arguments.length<3;return r(e,si(t,4),n,o,pr)},Vn.reduceRight=function(e,t,n){var r=qa(e)?It:Xt,o=arguments.length<3;return r(e,si(t,4),n,o,vr)},Vn.repeat=function(e,t,n){return t=(n?Ei(e,t,n):t===o)?1:mu(t),Zr(wu(e),t)},Vn.replace=function(){var e=arguments,t=wu(e[0]);return e.length<3?t:t.replace(e[1],e[2])},Vn.result=function(e,t,n){var r=-1,i=(t=Eo(t,e)).length;for(i||(i=1,e=o);++rv)return[];var n=m,r=wn(e,m);t=si(t),e-=m;for(var o=$t(r,t);++n=a)return e;var l=n-hn(r);if(l<1)return r;var c=u?Co(u,0,l).join(""):e.slice(0,l);if(i===o)return c+r;if(u&&(l+=c.length-l),uu(i)){if(e.slice(l).search(i)){var s,f=c;for(i.global||(i=De(i.source,wu(he.exec(i))+"g")),i.lastIndex=0;s=i.exec(f);)var d=s.index;c=c.slice(0,d===o?l:d)}}else if(e.indexOf(so(i),l)!=l){var p=c.lastIndexOf(i);p>-1&&(c=c.slice(0,p))}return c+r},Vn.unescape=function(e){return(e=wu(e))&&Z.test(e)?e.replace(Y,yn):e},Vn.uniqueId=function(e){var t=++Me;return wu(e)+t},Vn.upperCase=$u,Vn.upperFirst=Qu,Vn.each=wa,Vn.eachRight=Ea,Vn.first=Yi,cl(Vn,function(){var e={};return Er(Vn,(function(t,n){Le.call(Vn.prototype,n)||(e[n]=t)})),e}(),{chain:!1}),Vn.VERSION="4.17.21",Tt(["bind","bindKey","curry","curryRight","partial","partialRight"],(function(e){Vn[e].placeholder=Vn})),Tt(["drop","take"],(function(e,t){Kn.prototype[e]=function(n){n=n===o?1:Yt(mu(n),0);var r=this.__filtered__&&!t?new Kn(this):this.clone();return r.__filtered__?r.__takeCount__=wn(n,r.__takeCount__):r.__views__.push({size:wn(n,m),type:e+(r.__dir__<0?"Right":"")}),r},Kn.prototype[e+"Right"]=function(t){return this.reverse()[e](t).reverse()}})),Tt(["filter","map","takeWhile"],(function(e,t){var n=t+1,r=1==n||3==n;Kn.prototype[e]=function(e){var t=this.clone();return t.__iteratees__.push({iteratee:si(e,3),type:n}),t.__filtered__=t.__filtered__||r,t}})),Tt(["head","last"],(function(e,t){var n="take"+(t?"Right":"");Kn.prototype[e]=function(){return this[n](1).value()[0]}})),Tt(["initial","tail"],(function(e,t){var n="drop"+(t?"":"Right");Kn.prototype[e]=function(){return this.__filtered__?new Kn(this):this[n](1)}})),Kn.prototype.compact=function(){return this.filter(il)},Kn.prototype.find=function(e){return this.filter(e).head()},Kn.prototype.findLast=function(e){return this.reverse().find(e)},Kn.prototype.invokeMap=$r((function(e,t){return"function"==typeof e?new Kn(this):this.map((function(n){return Fr(n,e,t)}))})),Kn.prototype.reject=function(e){return this.filter(ja(si(e)))},Kn.prototype.slice=function(e,t){e=mu(e);var n=this;return n.__filtered__&&(e>0||t<0)?new Kn(n):(e<0?n=n.takeRight(-e):e&&(n=n.drop(e)),t!==o&&(n=(t=mu(t))<0?n.dropRight(-t):n.take(t-e)),n)},Kn.prototype.takeRightWhile=function(e){return this.reverse().takeWhile(e).reverse()},Kn.prototype.toArray=function(){return this.take(m)},Er(Kn.prototype,(function(e,t){var n=/^(?:filter|find|map|reject)|While$/.test(t),r=/^(?:head|last)$/.test(t),i=Vn[r?"take"+("last"==t?"Right":""):t],a=r||/^find/.test(t);i&&(Vn.prototype[t]=function(){var t=this.__wrapped__,u=r?[1]:arguments,l=t instanceof Kn,c=u[0],s=l||qa(t),f=function(e){var t=i.apply(Vn,Mt([e],u));return r&&d?t[0]:t};s&&n&&"function"==typeof c&&1!=c.length&&(l=s=!1);var d=this.__chain__,p=!!this.__actions__.length,v=a&&!d,h=l&&!p;if(!a&&s){t=h?t:new Kn(this);var m=e.apply(t,u);return m.__actions__.push({func:ha,args:[f],thisArg:o}),new Hn(m,d)}return v&&h?e.apply(this,u):(m=this.thru(f),v?r?m.value()[0]:m.value():m)})})),Tt(["pop","push","shift","sort","splice","unshift"],(function(e){var t=Re[e],n=/^(?:push|sort|unshift)$/.test(e)?"tap":"thru",r=/^(?:pop|shift)$/.test(e);Vn.prototype[e]=function(){var e=arguments;if(r&&!this.__chain__){var o=this.value();return t.apply(qa(o)?o:[],e)}return this[n]((function(n){return t.apply(qa(n)?n:[],e)}))}})),Er(Kn.prototype,(function(e,t){var n=Vn[t];if(n){var r=n.name+"";Le.call(Pn,r)||(Pn[r]=[]),Pn[r].push({name:t,func:n})}})),Pn[Uo(o,2).name]=[{name:"wrapper",func:o}],Kn.prototype.clone=function(){var e=new Kn(this.__wrapped__);return e.__actions__=Fo(this.__actions__),e.__dir__=this.__dir__,e.__filtered__=this.__filtered__,e.__iteratees__=Fo(this.__iteratees__),e.__takeCount__=this.__takeCount__,e.__views__=Fo(this.__views__),e},Kn.prototype.reverse=function(){if(this.__filtered__){var e=new Kn(this);e.__dir__=-1,e.__filtered__=!0}else(e=this.clone()).__dir__*=-1;return e},Kn.prototype.value=function(){var e=this.__wrapped__.value(),t=this.__dir__,n=qa(e),r=t<0,o=n?e.length:0,i=function(e,t,n){var r=-1,o=n.length;for(;++r=this.__values__.length;return{done:e,value:e?o:this.__values__[this.__index__++]}},Vn.prototype.plant=function(e){for(var t,n=this;n instanceof Wn;){var r=Vi(n);r.__index__=0,r.__values__=o,t?i.__wrapped__=r:t=r;var i=r;n=n.__wrapped__}return i.__wrapped__=e,t},Vn.prototype.reverse=function(){var e=this.__wrapped__;if(e instanceof Kn){var t=e;return this.__actions__.length&&(t=new Kn(this)),(t=t.reverse()).__actions__.push({func:ha,args:[na],thisArg:o}),new Hn(t,this.__chain__)}return this.thru(na)},Vn.prototype.toJSON=Vn.prototype.valueOf=Vn.prototype.value=function(){return mo(this.__wrapped__,this.__actions__)},Vn.prototype.first=Vn.prototype.head,$e&&(Vn.prototype[$e]=function(){return this}),Vn}();ht._=bn,(r=function(){return bn}.call(t,n,t,e))===o||(e.exports=r)}.call(this)},454:function(e){"use strict";var t=Object.getOwnPropertySymbols,n=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;function o(e){if(null===e||void 0===e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map((function(e){return t[e]})).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach((function(e){r[e]=e})),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(o){return!1}}()?Object.assign:function(e,i){for(var a,u,l=o(e),c=1;c