IOPaint/lama_cleaner/const.py

87 lines
2.0 KiB
Python
Raw Normal View History

2023-01-20 09:52:38 +01:00
import os
2023-02-06 15:00:47 +01:00
MPS_SUPPORT_MODELS = [
"instruct_pix2pix",
"sd1.5",
2023-03-01 14:44:02 +01:00
"anything4",
"realisticVision1.4",
2023-02-06 15:00:47 +01:00
"sd2",
2023-03-19 15:40:23 +01:00
"paint_by_example",
"controlnet",
2023-02-06 15:00:47 +01:00
]
2023-01-20 09:52:38 +01:00
DEFAULT_MODEL = "lama"
AVAILABLE_MODELS = [
"lama",
"ldm",
"zits",
"mat",
"fcf",
"sd1.5",
2023-03-01 14:44:02 +01:00
"anything4",
"realisticVision1.4",
2023-01-20 09:52:38 +01:00
"cv2",
"manga",
"sd2",
2023-01-28 14:13:21 +01:00
"paint_by_example",
2023-01-28 14:24:51 +01:00
"instruct_pix2pix",
2023-03-19 15:40:23 +01:00
"controlnet",
2023-01-20 09:52:38 +01:00
]
2023-03-19 15:40:23 +01:00
SD15_MODELS = ["sd1.5", "anything4", "realisticVision1.4"]
2023-01-20 09:52:38 +01:00
AVAILABLE_DEVICES = ["cuda", "cpu", "mps"]
2023-03-19 15:40:23 +01:00
DEFAULT_DEVICE = "cuda"
2023-01-20 09:52:38 +01:00
NO_HALF_HELP = """
Using full precision model.
If your generate result is always black or green, use this argument. (sd/paint_by_exmaple)
"""
CPU_OFFLOAD_HELP = """
Offloads all models to CPU, significantly reducing vRAM usage. (sd/paint_by_example)
"""
DISABLE_NSFW_HELP = """
Disable NSFW checker. (sd/paint_by_example)
"""
SD_CPU_TEXTENCODER_HELP = """
Run Stable Diffusion text encoder model on CPU to save GPU memory.
"""
2023-03-19 15:40:23 +01:00
SD_CONTROLNET_HELP = """
Run Stable Diffusion 1.5 inpainting model with controlNet-canny model.
"""
2023-01-20 09:52:38 +01:00
LOCAL_FILES_ONLY_HELP = """
Use local files only, not connect to Hugging Face server. (sd/paint_by_example)
"""
ENABLE_XFORMERS_HELP = """
Enable xFormers optimizations. Requires xformers package has been installed. See: https://github.com/facebookresearch/xformers (sd/paint_by_example)
"""
DEFAULT_MODEL_DIR = os.getenv(
2023-03-19 15:40:23 +01:00
"XDG_CACHE_HOME", os.path.join(os.path.expanduser("~"), ".cache")
2023-01-20 09:52:38 +01:00
)
MODEL_DIR_HELP = """
Model download directory (by setting XDG_CACHE_HOME environment variable), by default model downloaded to ~/.cache
"""
OUTPUT_DIR_HELP = """
Result images will be saved to output directory automatically without confirmation.
2023-01-20 09:52:38 +01:00
"""
INPUT_HELP = """
If input is image, it will be loaded by default.
If input is directory, you can browse and select image in file manager.
"""
GUI_HELP = """
Launch Lama Cleaner as desktop app
"""
NO_GUI_AUTO_CLOSE_HELP = """
Prevent backend auto close after the GUI window closed.
"""