replaced iopaint with inpaint

This commit is contained in:
root 2024-08-20 22:09:16 +02:00
parent c327e735cb
commit f1e5deba0f
56 changed files with 128 additions and 128 deletions

View File

@ -4,7 +4,7 @@ import torch
import numpy as np
from tqdm import tqdm
from iopaint.model.anytext.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
from inpaint.model.anytext.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
class DDIMSampler(object):

View File

@ -14,7 +14,7 @@ from tqdm import tqdm
from torchvision.utils import make_grid
from omegaconf import ListConfig
from iopaint.model.anytext.ldm.util import (
from inpaint.model.anytext.ldm.util import (
log_txt_as_img,
exists,
default,
@ -24,18 +24,18 @@ from iopaint.model.anytext.ldm.util import (
count_params,
instantiate_from_config,
)
from iopaint.model.anytext.ldm.modules.ema import LitEma
from iopaint.model.anytext.ldm.modules.distributions.distributions import (
from inpaint.model.anytext.ldm.modules.ema import LitEma
from inpaint.model.anytext.ldm.modules.distributions.distributions import (
normal_kl,
DiagonalGaussianDistribution,
)
from iopaint.model.anytext.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from iopaint.model.anytext.ldm.modules.diffusionmodules.util import (
from inpaint.model.anytext.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from inpaint.model.anytext.ldm.modules.diffusionmodules.util import (
make_beta_schedule,
extract_into_tensor,
noise_like,
)
from iopaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler
from inpaint.model.anytext.ldm.models.diffusion.ddim import DDIMSampler
import cv2

View File

@ -5,8 +5,8 @@ import numpy as np
from tqdm import tqdm
from functools import partial
from iopaint.model.anytext.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
from iopaint.model.anytext.ldm.models.diffusion.sampling_util import norm_thresholding
from inpaint.model.anytext.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
from inpaint.model.anytext.ldm.models.diffusion.sampling_util import norm_thresholding
class PLMSSampler(object):

View File

@ -6,7 +6,7 @@ from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any
from iopaint.model.anytext.ldm.modules.diffusionmodules.util import checkpoint
from inpaint.model.anytext.ldm.modules.diffusionmodules.util import checkpoint
# CrossAttn precision handling

View File

@ -6,7 +6,7 @@ import torch as th
import torch.nn as nn
import torch.nn.functional as F
from iopaint.model.anytext.ldm.modules.diffusionmodules.util import (
from inpaint.model.anytext.ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
@ -15,8 +15,8 @@ from iopaint.model.anytext.ldm.modules.diffusionmodules.util import (
normalization,
timestep_embedding,
)
from iopaint.model.anytext.ldm.modules.attention import SpatialTransformer
from iopaint.model.anytext.ldm.util import exists
from inpaint.model.anytext.ldm.modules.attention import SpatialTransformer
from inpaint.model.anytext.ldm.util import exists
# dummy replace

View File

@ -3,8 +3,8 @@ import torch.nn as nn
import numpy as np
from functools import partial
from iopaint.model.anytext.ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule
from iopaint.model.anytext.ldm.util import default
from inpaint.model.anytext.ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule
from inpaint.model.anytext.ldm.util import default
class AbstractLowScaleModel(nn.Module):

View File

@ -15,7 +15,7 @@ import torch.nn as nn
import numpy as np
from einops import repeat
from iopaint.model.anytext.ldm.util import instantiate_from_config
from inpaint.model.anytext.ldm.util import instantiate_from_config
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):

View File

@ -11,7 +11,7 @@ from transformers import (
CLIPVisionModelWithProjection,
)
from iopaint.model.anytext.ldm.util import count_params
from inpaint.model.anytext.ldm.util import count_params
def _expand_mask(mask, dtype, tgt_len=None):

View File

@ -8,7 +8,7 @@ seed = 66273235
# seed_everything(seed)
pipe = AnyTextPipeline(
ckpt_path="/Users/cwq/code/github/IOPaint/iopaint/model/anytext/anytext_v1.1_fp16.ckpt",
ckpt_path="/Users/cwq/code/github/IOPaint/inpaint/model/anytext/anytext_v1.1_fp16.ckpt",
font_path="/Users/cwq/code/github/AnyText/anytext/font/SourceHanSansSC-Medium.otf",
use_fp16=False,
device="mps",

View File

@ -6,13 +6,13 @@ import torch
import numpy as np
from loguru import logger
from iopaint.helper import (
from inpaint.helper import (
boxes_from_mask,
resize_max_size,
pad_img_to_modulo,
switch_mps_device,
)
from iopaint.schema import InpaintRequest, HDStrategy, SDSampler
from inpaint.schema import InpaintRequest, HDStrategy, SDSampler
from .helper.g_diffuser_bot import expand_image
from .utils import get_scheduler

View File

@ -3,7 +3,7 @@ import cv2
import torch
from diffusers import ControlNetModel
from loguru import logger
from iopaint.schema import InpaintRequest, ModelType
from inpaint.schema import InpaintRequest, ModelType
from .base import DiffusionInpaintModel
from .helper.controlnet_preprocess import (

View File

@ -6,9 +6,9 @@ import torch
import numpy as np
import torch.fft as fft
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
from iopaint.helper import (
from inpaint.helper import (
load_model,
get_cache_path_by_url,
norm_img,

View File

@ -4,7 +4,7 @@ import cv2
from PIL import Image
import numpy as np
from iopaint.helper import pad_img_to_modulo
from inpaint.helper import pad_img_to_modulo
def make_canny_control_image(image: np.ndarray) -> Image:

View File

@ -43,7 +43,7 @@ if __name__ == "__main__":
from pathlib import Path
current_dir = Path(__file__).parent.absolute().resolve()
image_path = "/Users/cwq/code/github/IOPaint/iopaint/tests/bunny.jpeg"
image_path = "/Users/cwq/code/github/IOPaint/inpaint/tests/bunny.jpeg"
init_image = cv2.imread(str(image_path))
init_image, mask_image = expand_image(
init_image,

View File

@ -3,9 +3,9 @@ import cv2
import torch
from loguru import logger
from iopaint.const import INSTRUCT_PIX2PIX_NAME
from inpaint.const import INSTRUCT_PIX2PIX_NAME
from .base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
from .utils import get_torch_dtype, enable_low_mem, is_local_files_only

View File

@ -3,9 +3,9 @@ import cv2
import numpy as np
import torch
from iopaint.const import KANDINSKY22_NAME
from inpaint.const import KANDINSKY22_NAME
from .base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
from .utils import get_torch_dtype, enable_low_mem, is_local_files_only

View File

@ -4,13 +4,13 @@ import cv2
import numpy as np
import torch
from iopaint.helper import (
from inpaint.helper import (
norm_img,
get_cache_path_by_url,
load_jit_model,
download_model,
)
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
from .base import InpaintModel
LAMA_MODEL_URL = os.environ.get(

View File

@ -7,11 +7,11 @@ from loguru import logger
from .base import InpaintModel
from .ddim_sampler import DDIMSampler
from .plms_sampler import PLMSSampler
from iopaint.schema import InpaintRequest, LDMSampler
from inpaint.schema import InpaintRequest, LDMSampler
torch.manual_seed(42)
import torch.nn as nn
from iopaint.helper import (
from inpaint.helper import (
download_model,
norm_img,
get_cache_path_by_url,

View File

@ -7,9 +7,9 @@ import torch
import time
from loguru import logger
from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model
from inpaint.helper import get_cache_path_by_url, load_jit_model, download_model
from .base import InpaintModel
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
MANGA_INPAINTOR_MODEL_URL = os.environ.get(

View File

@ -8,13 +8,13 @@ import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from iopaint.helper import (
from inpaint.helper import (
load_model,
get_cache_path_by_url,
norm_img,
download_model,
)
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
from .base import InpaintModel
from .utils import (
setup_filter,

View File

@ -3,7 +3,7 @@ import os
import cv2
import torch
from iopaint.helper import (
from inpaint.helper import (
load_jit_model,
download_model,
get_cache_path_by_url,
@ -12,7 +12,7 @@ from iopaint.helper import (
norm_img,
)
from .base import InpaintModel
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
MIGAN_MODEL_URL = os.environ.get(
"MIGAN_MODEL_URL",

View File

@ -1,6 +1,6 @@
import cv2
from .base import InpaintModel
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
flag_map = {"INPAINT_NS": cv2.INPAINT_NS, "INPAINT_TELEA": cv2.INPAINT_TELEA}

View File

@ -4,9 +4,9 @@ import cv2
import torch
from loguru import logger
from iopaint.helper import decode_base64_to_image
from inpaint.helper import decode_base64_to_image
from .base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
from .utils import get_torch_dtype, enable_low_mem, is_local_files_only

View File

@ -12,7 +12,7 @@ from ..utils import (
enable_low_mem,
is_local_files_only,
)
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
from .powerpaint_tokenizer import add_task_to_prompt
from ...const import POWERPAINT_NAME

View File

@ -3,7 +3,7 @@ from itertools import chain
import PIL.Image
import cv2
import torch
from iopaint.model.original_sd_configs import get_config_files
from inpaint.model.original_sd_configs import get_config_files
from loguru import logger
from transformers import CLIPTextModel, CLIPTokenizer
import numpy as np
@ -17,7 +17,7 @@ from ..utils import (
handle_from_pretrained_exceptions,
)
from .powerpaint_tokenizer import task_to_prompt
from iopaint.schema import InpaintRequest, ModelType
from inpaint.schema import InpaintRequest, ModelType
from .v2.BrushNet_CA import BrushNetModel
from .v2.unet_2d_condition import UNet2DConditionModel_forward
from .v2.unet_2d_blocks import (

View File

@ -3,7 +3,7 @@ import random
from typing import Any, List, Union
from transformers import CLIPTokenizer
from iopaint.schema import PowerPaintTask
from inpaint.schema import PowerPaintTask
def add_task_to_prompt(prompt, negative_prompt, task: PowerPaintTask):

View File

@ -12,7 +12,7 @@ from .utils import (
enable_low_mem,
is_local_files_only,
)
from iopaint.schema import InpaintRequest, ModelType
from inpaint.schema import InpaintRequest, ModelType
class SD(DiffusionInpaintModel):

View File

@ -5,8 +5,8 @@ import cv2
import torch
import torch.nn.functional as F
from iopaint.helper import get_cache_path_by_url, load_jit_model, download_model
from iopaint.schema import InpaintRequest
from inpaint.helper import get_cache_path_by_url, load_jit_model, download_model
from inpaint.schema import InpaintRequest
import numpy as np
from .base import InpaintModel

View File

@ -5,9 +5,9 @@ import torch.nn.functional as F
import numpy as np
from PIL import Image
from iopaint.helper import load_model
from iopaint.plugins.base_plugin import BasePlugin
from iopaint.schema import RunPluginRequest
from inpaint.helper import load_model
from inpaint.plugins.base_plugin import BasePlugin
from inpaint.schema import RunPluginRequest
class REBNCONV(nn.Module):

View File

@ -1,7 +1,7 @@
from loguru import logger
import numpy as np
from iopaint.schema import RunPluginRequest
from inpaint.schema import RunPluginRequest
class BasePlugin:

View File

@ -4,7 +4,7 @@ import torch
from torch import nn
from torch.nn import functional as F
from iopaint.plugins.basicsr.arch_util import default_init_weights
from inpaint.plugins.basicsr.arch_util import default_init_weights
class NormStyleCode(nn.Module):

View File

@ -2,9 +2,9 @@ import cv2
import numpy as np
from loguru import logger
from iopaint.helper import download_model
from iopaint.plugins.base_plugin import BasePlugin
from iopaint.schema import RunPluginRequest
from inpaint.helper import download_model
from inpaint.plugins.base_plugin import BasePlugin
from inpaint.schema import RunPluginRequest
class GFPGANPlugin(BasePlugin):

View File

@ -5,13 +5,13 @@ import numpy as np
import torch
from loguru import logger
from iopaint.helper import download_model
from iopaint.plugins.base_plugin import BasePlugin
from iopaint.plugins.segment_anything import SamPredictor, sam_model_registry
from iopaint.plugins.segment_anything.predictor_hq import SamHQPredictor
from iopaint.plugins.segment_anything2.build_sam import build_sam2
from iopaint.plugins.segment_anything2.sam2_image_predictor import SAM2ImagePredictor
from iopaint.schema import RunPluginRequest
from inpaint.helper import download_model
from inpaint.plugins.base_plugin import BasePlugin
from inpaint.plugins.segment_anything import SamPredictor, sam_model_registry
from inpaint.plugins.segment_anything.predictor_hq import SamHQPredictor
from inpaint.plugins.segment_anything2.build_sam import build_sam2
from inpaint.plugins.segment_anything2.sam2_image_predictor import SAM2ImagePredictor
from inpaint.schema import RunPluginRequest
# 从小到大
SEGMENT_ANYTHING_MODELS = {

View File

@ -7,9 +7,9 @@ from torch import nn
import torch.nn.functional as F
from loguru import logger
from iopaint.helper import download_model
from iopaint.plugins.base_plugin import BasePlugin
from iopaint.schema import RunPluginRequest, RealESRGANModel
from inpaint.helper import download_model
from inpaint.plugins.base_plugin import BasePlugin
from inpaint.schema import RunPluginRequest, RealESRGANModel
class RealESRGANer:

View File

@ -4,8 +4,8 @@ import numpy as np
from loguru import logger
from torch.hub import get_dir
from iopaint.plugins.base_plugin import BasePlugin
from iopaint.schema import RunPluginRequest, RemoveBGModel
from inpaint.plugins.base_plugin import BasePlugin
from inpaint.schema import RunPluginRequest, RemoveBGModel
class RemoveBG(BasePlugin):
@ -25,7 +25,7 @@ class RemoveBG(BasePlugin):
def _init_session(self, model_name: str):
if model_name == RemoveBGModel.briaai_rmbg_1_4:
from iopaint.plugins.briarmbg import (
from inpaint.plugins.briarmbg import (
create_briarmbg_session,
briarmbg_process,
)

View File

@ -2,9 +2,9 @@ import cv2
import numpy as np
from loguru import logger
from iopaint.helper import download_model
from iopaint.plugins.base_plugin import BasePlugin
from iopaint.schema import RunPluginRequest
from inpaint.helper import download_model
from inpaint.plugins.base_plugin import BasePlugin
from inpaint.schema import RunPluginRequest
class RestoreFormerPlugin(BasePlugin):

View File

@ -8,7 +8,7 @@ import torch
from functools import partial
from iopaint.plugins.segment_anything.modeling.tiny_vit_sam import TinyViT
from inpaint.plugins.segment_anything.modeling.tiny_vit_sam import TinyViT
from .modeling import (
ImageEncoderViT,

View File

@ -28,7 +28,7 @@ _CANDIDATES = [
"transformers",
"opencv-python",
"accelerate",
"iopaint",
"inpaint",
"rembg",
]
# Check once at runtime

View File

@ -1,6 +1,6 @@
import cv2
from iopaint.helper import adjust_mask
from iopaint.tests.utils import current_dir, save_dir
from inpaint.helper import adjust_mask
from inpaint.tests.utils import current_dir, save_dir
mask_p = current_dir / "overture-creations-5sI6fQgYIuo_mask.png"

View File

@ -1,6 +1,6 @@
import os
from iopaint.tests.utils import check_device, get_config, assert_equal
from inpaint.tests.utils import check_device, get_config, assert_equal
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
@ -8,8 +8,8 @@ from pathlib import Path
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy
from inpaint.model_manager import ModelManager
from inpaint.schema import HDStrategy
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"

View File

@ -1,7 +1,7 @@
import os
from iopaint.const import SD_BRUSHNET_CHOICES
from iopaint.tests.utils import check_device, get_config, assert_equal
from inpaint.const import SD_BRUSHNET_CHOICES
from inpaint.tests.utils import check_device, get_config, assert_equal
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
@ -9,8 +9,8 @@ from pathlib import Path
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy, SDSampler, PowerPaintTask
from inpaint.model_manager import ModelManager
from inpaint.schema import HDStrategy, SDSampler, PowerPaintTask
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"

View File

@ -1,7 +1,7 @@
import os
from iopaint.const import SD_CONTROLNET_CHOICES
from iopaint.tests.utils import current_dir, check_device, get_config, assert_equal
from inpaint.const import SD_CONTROLNET_CHOICES
from inpaint.tests.utils import current_dir, check_device, get_config, assert_equal
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
@ -9,8 +9,8 @@ from pathlib import Path
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy, SDSampler
from inpaint.model_manager import ModelManager
from inpaint.schema import HDStrategy, SDSampler
model_name = "runwayml/stable-diffusion-inpainting"

View File

@ -3,9 +3,9 @@ from pathlib import Path
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy
from iopaint.tests.utils import get_config, check_device, assert_equal, current_dir
from inpaint.model_manager import ModelManager
from inpaint.schema import HDStrategy
from inpaint.tests.utils import get_config, check_device, assert_equal, current_dir
model_name = "timbrooks/instruct-pix2pix"

View File

@ -1,5 +1,5 @@
from iopaint.helper import load_img
from iopaint.tests.utils import current_dir
from inpaint.helper import load_img
from inpaint.tests.utils import current_dir
png_img_p = current_dir / "image.png"
jpg_img_p = current_dir / "bunny.jpeg"

View File

@ -2,15 +2,15 @@ import os
from loguru import logger
from iopaint.tests.utils import check_device, get_config, assert_equal, current_dir
from inpaint.tests.utils import check_device, get_config, assert_equal, current_dir
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy, SDSampler
from inpaint.model_manager import ModelManager
from inpaint.schema import HDStrategy, SDSampler
@pytest.mark.parametrize("device", ["cuda", "mps"])

View File

@ -1,9 +1,9 @@
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import SDSampler, HDStrategy
from iopaint.tests.utils import check_device, get_config, assert_equal, current_dir
from inpaint.model_manager import ModelManager
from inpaint.schema import SDSampler, HDStrategy
from inpaint.tests.utils import check_device, get_config, assert_equal, current_dir
@pytest.mark.parametrize("device", ["cuda", "mps"])

View File

@ -1,9 +1,9 @@
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy, LDMSampler
from iopaint.tests.utils import assert_equal, get_config, current_dir, check_device
from inpaint.model_manager import ModelManager
from inpaint.schema import HDStrategy, LDMSampler
from inpaint.tests.utils import assert_equal, get_config, current_dir, check_device
@pytest.mark.parametrize("device", ["cuda", "mps", "cpu"])

View File

@ -1,6 +1,6 @@
def test_load_model():
from iopaint.plugins import InteractiveSeg
from iopaint.model_manager import ModelManager
from inpaint.plugins import InteractiveSeg
from inpaint.model_manager import ModelManager
interactive_seg_model = InteractiveSeg("vit_l", "cpu")

View File

@ -1,12 +1,12 @@
import os
from iopaint.schema import InpaintRequest
from inpaint.schema import InpaintRequest
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import torch
from iopaint.model_manager import ModelManager
from inpaint.model_manager import ModelManager
def test_model_switch():

View File

@ -1,15 +1,15 @@
import os
from iopaint.tests.utils import current_dir, check_device
from inpaint.tests.utils import current_dir, check_device
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import SDSampler
from iopaint.tests.test_model import get_config, assert_equal
from inpaint.model_manager import ModelManager
from inpaint.schema import SDSampler
from inpaint.tests.test_model import get_config, assert_equal
@pytest.mark.parametrize("name", ["runwayml/stable-diffusion-inpainting"])

View File

@ -1,11 +1,11 @@
import cv2
import pytest
from PIL import Image
from iopaint.helper import encode_pil_to_base64
from inpaint.helper import encode_pil_to_base64
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy
from iopaint.tests.utils import (
from inpaint.model_manager import ModelManager
from inpaint.schema import HDStrategy
from inpaint.tests.utils import (
current_dir,
get_config,
get_data,

View File

@ -1,17 +1,17 @@
import os
from PIL import Image
from iopaint.helper import encode_pil_to_base64, gen_frontend_mask
from iopaint.plugins.anime_seg import AnimeSeg
from iopaint.schema import RunPluginRequest, RemoveBGModel, InteractiveSegModel
from iopaint.tests.utils import check_device, current_dir, save_dir
from inpaint.helper import encode_pil_to_base64, gen_frontend_mask
from inpaint.plugins.anime_seg import AnimeSeg
from inpaint.schema import RunPluginRequest, RemoveBGModel, InteractiveSegModel
from inpaint.tests.utils import check_device, current_dir, save_dir
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import cv2
import pytest
from iopaint.plugins import (
from inpaint.plugins import (
RemoveBG,
RealESRGANUpscaler,
GFPGANPlugin,

View File

@ -5,7 +5,7 @@ from typing import List
from PIL import Image
from iopaint.helper import pil_to_bytes, load_img
from inpaint.helper import pil_to_bytes, load_img
current_dir = Path(__file__).parent.absolute().resolve()

View File

@ -2,7 +2,7 @@ import os
from loguru import logger
from iopaint.tests.utils import check_device, get_config, assert_equal
from inpaint.tests.utils import check_device, get_config, assert_equal
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
@ -10,8 +10,8 @@ from pathlib import Path
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy, SDSampler
from inpaint.model_manager import ModelManager
from inpaint.schema import HDStrategy, SDSampler
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"

View File

@ -1,15 +1,15 @@
import os
from iopaint.tests.utils import check_device, current_dir
from inpaint.tests.utils import check_device, current_dir
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import pytest
import torch
from iopaint.model_manager import ModelManager
from iopaint.schema import HDStrategy, SDSampler
from iopaint.tests.test_model import get_config, assert_equal
from inpaint.model_manager import ModelManager
from inpaint.schema import HDStrategy, SDSampler
from inpaint.tests.test_model import get_config, assert_equal
@pytest.mark.parametrize("device", ["cuda", "mps"])

View File

@ -3,7 +3,7 @@ import cv2
import pytest
import torch
from iopaint.schema import LDMSampler, HDStrategy, InpaintRequest, SDSampler
from inpaint.schema import LDMSampler, HDStrategy, InpaintRequest, SDSampler
import numpy as np
current_dir = Path(__file__).parent.absolute().resolve()