This commit is contained in:
Qing 2024-01-05 16:40:06 +08:00
parent f88f3cbbb3
commit fd74b8556f
25 changed files with 45 additions and 45 deletions

View File

@ -1,5 +1,5 @@
from pathlib import Path
from typing import Dict
from typing import Dict, Optional
import typer
from fastapi import FastAPI
@ -114,8 +114,8 @@ def start(
device: Device = Option(Device.cpu),
gui: bool = Option(False, help=GUI_HELP),
disable_model_switch: bool = Option(False),
input: Path = Option(None, help=INPUT_HELP),
output_dir: Path = Option(
input: Optional[Path] = Option(None, help=INPUT_HELP),
output_dir: Optional[Path] = Option(
None, help=OUTPUT_DIR_HELP, dir_okay=True, file_okay=False
),
quality: int = Option(95, help=QUALITY_HELP),

View File

@ -12,9 +12,9 @@ from iopaint.helper import (
pad_img_to_modulo,
switch_mps_device,
)
from iopaint.model.helper.g_diffuser_bot import expand_image
from iopaint.model.utils import get_scheduler
from iopaint.schema import InpaintRequest, HDStrategy, SDSampler
from .helper.g_diffuser_bot import expand_image
from .utils import get_scheduler
class InpaintModel:

View File

@ -1,20 +1,19 @@
import PIL.Image
import cv2
import numpy as np
import torch
from diffusers import ControlNetModel, DiffusionPipeline
from diffusers import ControlNetModel
from loguru import logger
from iopaint.schema import InpaintRequest, ModelType
from iopaint.model.base import DiffusionInpaintModel
from iopaint.model.helper.controlnet_preprocess import (
from .base import DiffusionInpaintModel
from .helper.controlnet_preprocess import (
make_canny_control_image,
make_openpose_control_image,
make_depth_control_image,
make_inpaint_control_image,
)
from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
from iopaint.model.utils import get_scheduler, handle_from_pretrained_exceptions
from iopaint.schema import InpaintRequest, ModelType
from .helper.cpu_text_encoder import CPUTextEncoderWrapper
from .utils import get_scheduler, handle_from_pretrained_exceptions
class ControlNet(DiffusionInpaintModel):

View File

@ -2,7 +2,7 @@ import torch
import numpy as np
from tqdm import tqdm
from iopaint.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from .utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from loguru import logger

View File

@ -16,11 +16,11 @@ from iopaint.helper import (
resize_max_size,
download_model,
)
from iopaint.model.base import InpaintModel
from .base import InpaintModel
from torch import conv2d, nn
import torch.nn.functional as F
from iopaint.model.utils import (
from .utils import (
setup_filter,
_parse_scaling,
_parse_padding,

View File

View File

@ -1,5 +1,5 @@
import torch
from iopaint.model.utils import torch_gc
from ..utils import torch_gc
class CPUTextEncoderWrapper(torch.nn.Module):

View File

@ -4,7 +4,7 @@ import torch
from loguru import logger
from iopaint.const import INSTRUCT_PIX2PIX_NAME
from iopaint.model.base import DiffusionInpaintModel
from .base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest

View File

@ -4,7 +4,7 @@ import numpy as np
import torch
from iopaint.const import KANDINSKY22_NAME
from iopaint.model.base import DiffusionInpaintModel
from .base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest

View File

@ -10,8 +10,8 @@ from iopaint.helper import (
load_jit_model,
download_model,
)
from iopaint.model.base import InpaintModel
from iopaint.schema import InpaintRequest
from .base import InpaintModel
LAMA_MODEL_URL = os.environ.get(
"LAMA_MODEL_URL",

View File

@ -4,9 +4,9 @@ import numpy as np
import torch
from loguru import logger
from iopaint.model.base import InpaintModel
from iopaint.model.ddim_sampler import DDIMSampler
from iopaint.model.plms_sampler import PLMSSampler
from .base import InpaintModel
from .ddim_sampler import DDIMSampler
from .plms_sampler import PLMSSampler
from iopaint.schema import InpaintRequest, LDMSampler
torch.manual_seed(42)
@ -17,7 +17,7 @@ from iopaint.helper import (
get_cache_path_by_url,
load_jit_model,
)
from iopaint.model.utils import (
from .utils import (
make_beta_schedule,
timestep_embedding,
)

View File

@ -8,7 +8,7 @@ import time
from loguru import logger
from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model
from iopaint.model.base import InpaintModel
from .base import InpaintModel
from iopaint.schema import InpaintRequest

View File

@ -14,8 +14,9 @@ from iopaint.helper import (
norm_img,
download_model,
)
from iopaint.model.base import InpaintModel
from iopaint.model.utils import (
from iopaint.schema import InpaintRequest
from .base import InpaintModel
from .utils import (
setup_filter,
Conv2dLayer,
FullyConnectedLayer,
@ -28,7 +29,6 @@ from iopaint.model.utils import (
normalize_2nd_moment,
set_seed,
)
from iopaint.schema import InpaintRequest
class ModulatedConv2d(nn.Module):

View File

@ -11,7 +11,7 @@ from iopaint.helper import (
resize_max_size,
norm_img,
)
from iopaint.model.base import InpaintModel
from .base import InpaintModel
from iopaint.schema import InpaintRequest
MIGAN_MODEL_URL = os.environ.get(

View File

@ -1,5 +1,5 @@
import cv2
from iopaint.model.base import InpaintModel
from .base import InpaintModel
from iopaint.schema import InpaintRequest
flag_map = {"INPAINT_NS": cv2.INPAINT_NS, "INPAINT_TELEA": cv2.INPAINT_TELEA}

View File

@ -5,7 +5,7 @@ import torch
from loguru import logger
from iopaint.helper import decode_base64_to_image
from iopaint.model.base import DiffusionInpaintModel
from .base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest

View File

@ -1,7 +1,7 @@
# From: https://github.com/CompVis/latent-diffusion/blob/main/ldm/models/diffusion/plms.py
import torch
import numpy as np
from iopaint.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from .utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from tqdm import tqdm

View File

@ -4,9 +4,9 @@ import cv2
import torch
from loguru import logger
from iopaint.model.base import DiffusionInpaintModel
from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
from iopaint.model.utils import handle_from_pretrained_exceptions
from ..base import DiffusionInpaintModel
from ..helper.cpu_text_encoder import CPUTextEncoderWrapper
from ..utils import handle_from_pretrained_exceptions
from iopaint.schema import InpaintRequest
from .powerpaint_tokenizer import add_task_to_prompt
from ...const import POWERPAINT_NAME

View File

@ -3,9 +3,9 @@ import cv2
import torch
from loguru import logger
from iopaint.model.base import DiffusionInpaintModel
from iopaint.model.helper.cpu_text_encoder import CPUTextEncoderWrapper
from iopaint.model.utils import handle_from_pretrained_exceptions
from .base import DiffusionInpaintModel
from .helper.cpu_text_encoder import CPUTextEncoderWrapper
from .utils import handle_from_pretrained_exceptions
from iopaint.schema import InpaintRequest, ModelType

View File

@ -6,10 +6,11 @@ import torch
from diffusers import AutoencoderKL
from loguru import logger
from iopaint.model.base import DiffusionInpaintModel
from iopaint.model.utils import handle_from_pretrained_exceptions
from iopaint.schema import InpaintRequest, ModelType
from .base import DiffusionInpaintModel
from .utils import handle_from_pretrained_exceptions
class SDXL(DiffusionInpaintModel):
name = "diffusers/stable-diffusion-xl-1.0-inpainting-0.1"

View File

@ -9,7 +9,7 @@ from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model
from iopaint.schema import InpaintRequest
import numpy as np
from iopaint.model.base import InpaintModel
from .base import InpaintModel
ZITS_INPAINT_MODEL_URL = os.environ.get(
"ZITS_INPAINT_MODEL_URL",

View File

@ -92,8 +92,8 @@ class ApiConfig(BaseModel):
device: Device
gui: bool
disable_model_switch: bool
input: Path
output_dir: Path
input: Optional[Path]
output_dir: Optional[Path]
quality: int
enable_interactive_seg: bool
interactive_seg_model: InteractiveSegModel

View File

@ -21,7 +21,7 @@ def load_requirements():
# https://setuptools.readthedocs.io/en/latest/setuptools.html#including-data-files
setuptools.setup(
name="IOPaint",
version="1.0.0-beta.1",
version="1.0.0-beta.2",
author="PanicByte",
author_email="cwq1913@gmail.com",
description="Image inpainting, outpainting tool powered by SOTA AI Model",

View File

@ -3,7 +3,7 @@ import io from "socket.io-client"
import { Progress } from "./ui/progress"
import { useStore } from "@/lib/states"
export const API_ENDPOINT = import.meta.env.VITE_BACKEND
export const API_ENDPOINT = import.meta.env.DEV
? import.meta.env.VITE_BACKEND
: ""
const socket = io(API_ENDPOINT)

View File

@ -10,7 +10,7 @@ import { Settings } from "@/lib/states"
import { convertToBase64, srcToFile } from "@/lib/utils"
import axios from "axios"
export const API_ENDPOINT = import.meta.env.VITE_BACKEND
export const API_ENDPOINT = import.meta.env.DEV
? import.meta.env.VITE_BACKEND + "/api/v1"
: "/api/v1"