add installer web config

This commit is contained in:
Qing 2023-01-20 16:52:38 +08:00
parent b35cffadbd
commit 1239ab047d
12 changed files with 297 additions and 209 deletions

68
lama_cleaner/const.py Normal file
View File

@ -0,0 +1,68 @@
import os
DEFAULT_MODEL = "lama"
AVAILABLE_MODELS = [
"lama",
"ldm",
"zits",
"mat",
"fcf",
"sd1.5",
"cv2",
"manga",
"sd2",
"paint_by_example"
]
AVAILABLE_DEVICES = ["cuda", "cpu", "mps"]
DEFAULT_DEVICE = 'cuda'
NO_HALF_HELP = """
Using full precision model.
If your generate result is always black or green, use this argument. (sd/paint_by_exmaple)
"""
CPU_OFFLOAD_HELP = """
Offloads all models to CPU, significantly reducing vRAM usage. (sd/paint_by_example)
"""
DISABLE_NSFW_HELP = """
Disable NSFW checker. (sd/paint_by_example)
"""
SD_CPU_TEXTENCODER_HELP = """
Run Stable Diffusion text encoder model on CPU to save GPU memory.
"""
LOCAL_FILES_ONLY_HELP = """
Use local files only, not connect to Hugging Face server. (sd/paint_by_example)
"""
ENABLE_XFORMERS_HELP = """
Enable xFormers optimizations. Requires xformers package has been installed. See: https://github.com/facebookresearch/xformers (sd/paint_by_example)
"""
DEFAULT_MODEL_DIR = os.getenv(
"XDG_CACHE_HOME",
os.path.join(os.path.expanduser("~"), ".cache")
)
MODEL_DIR_HELP = """
Model download directory (by setting XDG_CACHE_HOME environment variable), by default model downloaded to ~/.cache
"""
OUTPUT_DIR_HELP = """
Only required when --input is directory. Result images will be saved to output directory automatically.
"""
INPUT_HELP = """
If input is image, it will be loaded by default.
If input is directory, you can browse and select image in file manager.
"""
GUI_HELP = """
Launch Lama Cleaner as desktop app
"""
NO_GUI_AUTO_CLOSE_HELP = """
Prevent backend auto close after the GUI window closed.
"""

View File

@ -5,19 +5,47 @@ from pathlib import Path
from loguru import logger
from lama_cleaner.const import AVAILABLE_MODELS, NO_HALF_HELP, CPU_OFFLOAD_HELP, DISABLE_NSFW_HELP, \
SD_CPU_TEXTENCODER_HELP, LOCAL_FILES_ONLY_HELP, AVAILABLE_DEVICES, ENABLE_XFORMERS_HELP, MODEL_DIR_HELP, \
OUTPUT_DIR_HELP, INPUT_HELP, GUI_HELP, DEFAULT_DEVICE, NO_GUI_AUTO_CLOSE_HELP, DEFAULT_MODEL_DIR
from lama_cleaner.runtime import dump_environment_info
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--host", default="127.0.0.1")
parser.add_argument("--port", default=8080, type=int)
parser.add_argument("--config-installer", action="store_true",
help="Open config web page, mainly for windows installer")
parser.add_argument("--load-installer-config", action="store_true",
help="Load all cmd args from installer config file")
parser.add_argument("--installer-config", default=None, help="Config file for windows installer")
parser.add_argument("--model", default="lama", choices=AVAILABLE_MODELS)
parser.add_argument("--no-half", action="store_true", help=NO_HALF_HELP)
parser.add_argument("--cpu-offload", action="store_true", help=CPU_OFFLOAD_HELP)
parser.add_argument("--disable-nsfw", action="store_true", help=DISABLE_NSFW_HELP)
parser.add_argument("--sd-cpu-textencoder", action="store_true", help=SD_CPU_TEXTENCODER_HELP)
parser.add_argument("--local-files-only", action="store_true", help=LOCAL_FILES_ONLY_HELP)
parser.add_argument("--enable-xformers", action="store_true", help=ENABLE_XFORMERS_HELP)
parser.add_argument("--device", default=DEFAULT_DEVICE, type=str, choices=AVAILABLE_DEVICES)
parser.add_argument("--gui", action="store_true", help=GUI_HELP)
parser.add_argument("--no-gui-auto-close", action="store_true", help=NO_GUI_AUTO_CLOSE_HELP)
parser.add_argument(
"--model",
default="lama",
choices=["lama", "ldm", "zits", "mat", "fcf", "sd1.5", "cv2", "manga", "sd2", "paint_by_example"],
"--gui-size",
default=[1600, 1000],
nargs=2,
type=int,
help="Set window size for GUI",
)
parser.add_argument("--no-half", action="store_true", help="sd/paint_by_example model no half precision")
parser.add_argument("--cpu-offload", action="store_true",
help="sd/paint_by_example model, offloads all models to CPU, significantly reducing vRAM usage.")
parser.add_argument("--input", type=str, default=None, help=INPUT_HELP)
parser.add_argument("--output-dir", type=str, default=None, help=OUTPUT_DIR_HELP)
parser.add_argument("--model-dir", type=str, default=DEFAULT_MODEL_DIR, help=MODEL_DIR_HELP)
parser.add_argument("--disable-model-switch", action="store_true", help="Disable model switch in frontend")
parser.add_argument("--debug", action="store_true")
# useless args
parser.add_argument(
"--hf_access_token",
default="",
@ -28,70 +56,48 @@ def parse_args():
action="store_true",
help="Disable Stable Diffusion NSFW checker",
)
parser.add_argument(
"--disable-nsfw",
action="store_true",
help="Disable Stable Diffusion/Paint By Example NSFW checker",
)
parser.add_argument(
"--sd-cpu-textencoder",
action="store_true",
help="Always run Stable Diffusion TextEncoder model on CPU",
)
parser.add_argument(
"--sd-run-local",
action="store_true",
help="SD model no more need token, use --local-files-only to set not connect to huggingface server",
)
parser.add_argument(
"--local-files-only",
action="store_true",
help="sd/paint_by_example model. Use local files only, not connect to huggingface server",
)
parser.add_argument(
"--sd-enable-xformers",
action="store_true",
help="Enable xFormers optimizations. Requires that xformers package has been installed. See: https://github.com/facebookresearch/xformers"
)
parser.add_argument(
"--enable-xformers",
action="store_true",
help="sd/paint_by_example model. Enable xFormers optimizations. Requires that xformers package has been installed. See: https://github.com/facebookresearch/xformers"
)
parser.add_argument("--device", default="cuda", type=str, choices=["cuda", "cpu", "mps"])
parser.add_argument("--gui", action="store_true", help="Launch as desktop app")
parser.add_argument(
"--gui-size",
default=[1600, 1000],
nargs=2,
type=int,
help="Set window size for GUI",
)
parser.add_argument(
"--input", type=str,
help="If input is image, it will be load by default. If input is directory, all images will be loaded to file manager"
)
parser.add_argument(
"--output-dir", type=str,
help="Only required when --input is directory. Output directory for all processed images"
)
parser.add_argument(
"--model-dir", type=str, default=None,
help="Model download directory (by setting XDG_CACHE_HOME environment variable), "
"by default model downloaded to ~/.cache"
)
parser.add_argument("--disable-model-switch", action="store_true", help="Disable model switch in frontend")
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
# collect system info to help debug
dump_environment_info()
if args.config_installer:
if args.installer_config is None:
parser.error(f"args.config_installer==True, must set args.installer_config to store config file")
from lama_cleaner.web_config import main
logger.info(f"Launching installer web config page")
main(args.installer_config)
exit()
if args.load_installer_config:
from lama_cleaner.web_config import load_config
if args.installer_config and not os.path.exists(args.installer_config):
parser.error(f"args.installer_config={args.installer_config} not exists")
logger.info(f"Loading installer config from {args.installer_config}")
_args = load_config(args.installer_config)
for k, v in vars(_args).items():
if k in vars(args):
setattr(args, k, v)
if args.device == "cuda":
import torch
if torch.cuda.is_available() is False:
parser.error(
"torch.cuda.is_available() is False, please use --device cpu or check your pytorch installation")
if args.model_dir is not None:
if args.model_dir and args.model_dir is not None:
if os.path.isfile(args.model_dir):
parser.error(f"invalid --model-dir: {args.model_dir} is a file")
@ -101,7 +107,7 @@ def parse_args():
os.environ["XDG_CACHE_HOME"] = args.model_dir
if args.input is not None:
if args.input and args.input is not None:
if not os.path.exists(args.input):
parser.error(f"invalid --input: {args.input} not exists")
if os.path.isfile(args.input):

46
lama_cleaner/runtime.py Normal file
View File

@ -0,0 +1,46 @@
# https://github.com/huggingface/huggingface_hub/blob/5a12851f54bf614be39614034ed3a9031922d297/src/huggingface_hub/utils/_runtime.py
import platform
import sys
import packaging.version
from rich import print
from typing import Dict, Any
_PY_VERSION: str = sys.version.split()[0].rstrip("+")
if packaging.version.Version(_PY_VERSION) < packaging.version.Version("3.8.0"):
import importlib_metadata # type: ignore
else:
import importlib.metadata as importlib_metadata # type: ignore
_package_versions = {}
_CANDIDATES = [
"torch",
"Pillow",
"diffusers",
"transformers",
"opencv-python",
"xformers",
"accelerate",
"lama-cleaner"
]
# Check once at runtime
for name in _CANDIDATES:
_package_versions[name] = "N/A"
try:
_package_versions[name] = importlib_metadata.version(name)
except importlib_metadata.PackageNotFoundError:
pass
def dump_environment_info() -> Dict[str, str]:
"""Dump information about the machine to help debugging issues. """
# Generic machine info
info: Dict[str, Any] = {
"Platform": platform.platform(),
"Python version": platform.python_version(),
}
info.update(_package_versions)
print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]) + "\n")
return info

View File

@ -408,7 +408,8 @@ def main(args):
from flaskwebgui import FlaskUI
ui = FlaskUI(
app, width=app_width, height=app_height, host=args.host, port=args.port
app, width=app_width, height=app_height, host=args.host, port=args.port,
close_server_on_exit=not args.no_gui_auto_close
)
ui.run()
else:

118
lama_cleaner/web_config.py Normal file
View File

@ -0,0 +1,118 @@
import json
import os
from datetime import datetime
import gradio as gr
from loguru import logger
from pydantic import BaseModel
from lama_cleaner.const import AVAILABLE_MODELS, AVAILABLE_DEVICES, CPU_OFFLOAD_HELP, NO_HALF_HELP, DISABLE_NSFW_HELP, \
SD_CPU_TEXTENCODER_HELP, LOCAL_FILES_ONLY_HELP, ENABLE_XFORMERS_HELP, MODEL_DIR_HELP, OUTPUT_DIR_HELP, INPUT_HELP, \
GUI_HELP, DEFAULT_MODEL, DEFAULT_DEVICE, NO_GUI_AUTO_CLOSE_HELP, DEFAULT_MODEL_DIR
_config_file = None
class Config(BaseModel):
host: str = "127.0.0.1"
port: int = 8080
model: str = DEFAULT_MODEL
device: str = DEFAULT_DEVICE
gui: bool = False
no_gui_auto_close: bool = False
no_half: bool = False
cpu_offload: bool = False
disable_nsfw: bool = False
sd_cpu_textencoder: bool = False
enable_xformers: bool = False
local_files_only: bool = False
model_dir: str = DEFAULT_MODEL_DIR
input: str = None
output_dir: str = None
def load_config(installer_config: str):
if os.path.exists(installer_config):
with open(installer_config, "r", encoding='utf-8') as f:
return Config(**json.load(f))
else:
return Config()
def save_config(
host, port, model, device, gui, no_gui_auto_close, no_half, cpu_offload,
disable_nsfw, sd_cpu_textencoder, enable_xformers, local_files_only,
model_dir, input, output_dir
):
config = Config(**locals())
print(config)
if config.input and not os.path.exists(config.input):
return "[Error] Input file or directory does not exist"
current_time = datetime.now().strftime("%H:%M:%S")
msg = f"[{current_time}] Successful save config to: {os.path.abspath(_config_file)}"
logger.info(msg)
try:
with open(_config_file, "w", encoding="utf-8") as f:
json.dump(config.dict(), f, indent=4, ensure_ascii=False)
except Exception as e:
return f"Save failed: {str(e)}"
return msg
def close_server(*args):
# TODO: make close both browser and server works
import os, signal
pid = os.getpid()
os.kill(pid, signal.SIGUSR1)
def main(config_file: str):
global _config_file
_config_file = config_file
init_config = load_config(config_file)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=1):
save_btn = gr.Button(value="Save configurations")
message = gr.HTML()
# with gr.Column(scale=0, min_width=100):
# exit_btn = gr.Button(value="Close")
# exit_btn.click(close_server)
with gr.Row():
host = gr.Textbox(init_config.host, label="Host")
port = gr.Number(init_config.port, label="Port", precision=0)
with gr.Row():
model = gr.Radio(AVAILABLE_MODELS, label="Model", value=init_config.model)
device = gr.Radio(AVAILABLE_DEVICES, label="Device", value=init_config.device)
gui = gr.Checkbox(init_config.gui, label=f"{GUI_HELP}")
no_gui_auto_close = gr.Checkbox(init_config.no_gui_auto_close, label=f"{NO_GUI_AUTO_CLOSE_HELP}")
no_half = gr.Checkbox(init_config.no_half, label=f"{NO_HALF_HELP}")
cpu_offload = gr.Checkbox(init_config.cpu_offload, label=f"{CPU_OFFLOAD_HELP}")
disable_nsfw = gr.Checkbox(init_config.disable_nsfw, label=f"{DISABLE_NSFW_HELP}")
sd_cpu_textencoder = gr.Checkbox(init_config.sd_cpu_textencoder, label=f"{SD_CPU_TEXTENCODER_HELP}")
enable_xformers = gr.Checkbox(init_config.enable_xformers, label=f"{ENABLE_XFORMERS_HELP}")
local_files_only = gr.Checkbox(init_config.local_files_only, label=f"{LOCAL_FILES_ONLY_HELP}")
model_dir = gr.Textbox(init_config.model_dir, label=f"{MODEL_DIR_HELP}")
input = gr.Textbox(init_config.input, label=f"Input file or directory. {INPUT_HELP}")
output_dir = gr.Textbox(init_config.output_dir, label=f"Output directory. {OUTPUT_DIR_HELP}")
save_btn.click(save_config, [
host,
port,
model,
device,
gui,
no_gui_auto_close,
no_half,
cpu_offload,
disable_nsfw,
sd_cpu_textencoder,
enable_xformers,
local_files_only,
model_dir,
input,
output_dir,
], message)
demo.launch(inbrowser=True, show_api=False)

View File

@ -5,6 +5,7 @@ flask==1.1.4
flaskwebgui==0.3.5
tqdm
pydantic
rich
loguru
pytest
yacs
@ -12,4 +13,5 @@ markupsafe==2.0.1
scikit-image==0.19.3
diffusers[torch]==0.10.2
transformers>=4.25.1
watchdog==2.2.1
watchdog==2.2.1
gradio

View File

@ -1,13 +0,0 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
echo `pwd`
source ./installer/bin/activate
conda-unpack
pip3 install -U lama-cleaner
invoke config --disable-device-choice

View File

@ -1,11 +0,0 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
echo `pwd`
source ./installer/bin/activate
conda-unpack
invoke start

View File

@ -1,127 +0,0 @@
import os
import json
from enum import Enum
import socket
import logging
from contextlib import closing
from invoke import task
from rich import print
from rich.prompt import IntPrompt, Prompt, Confirm
from rich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(
level="INFO", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]
)
log = logging.getLogger("lama-cleaner")
def find_free_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
CONFIG_PATH = "config.json"
class MODEL(str, Enum):
SD15 = "sd1.5"
LAMA = "lama"
PAINT_BY_EXAMPLE = 'paint_by_example'
class DEVICE(str, Enum):
CUDA = "cuda"
CPU = "cpu"
@task
def info(c):
print("Environment information".center(60, "-"))
try:
c.run("git --version")
c.run("conda --version")
c.run("which python")
c.run("python --version")
c.run("which pip")
c.run("pip --version")
c.run('pip list | grep "torch\|lama\|diffusers\|opencv\|cuda\|xformers\|accelerate"')
except:
pass
print("-" * 60)
@task(pre=[info])
def config(c, disable_device_choice=False):
model = Prompt.ask(
"Choice model", choices=[MODEL.SD15, MODEL.LAMA, MODEL.PAINT_BY_EXAMPLE], default=MODEL.SD15
)
if disable_device_choice:
device = DEVICE.CPU
else:
device = Prompt.ask(
"Choice device", choices=[DEVICE.CUDA, DEVICE.CPU], default=DEVICE.CUDA
)
if device == DEVICE.CUDA:
import torch
if not torch.cuda.is_available():
log.warning(
"Did not find CUDA device on your computer, fallback to cpu"
)
device = DEVICE.CPU
desktop = Confirm.ask("Start as desktop app?", default=True)
configs = {
"model": model,
"device": device,
"desktop": desktop,
}
log.info(f"Save config to {CONFIG_PATH}")
with open(CONFIG_PATH, "w", encoding="utf-8") as f:
json.dump(configs, f, indent=2, ensure_ascii=False)
Confirm.ask("Config finish, you can close this window")
@task(pre=[info])
def start(c):
if not os.path.exists(CONFIG_PATH):
Confirm.ask("Config file not exists, please run config scritp first")
exit()
log.info(f"Load config from {CONFIG_PATH}")
with open(CONFIG_PATH, "r", encoding="utf-8") as f:
configs = json.load(f)
model = configs["model"]
device = configs["device"]
desktop = configs["desktop"]
port = find_free_port()
log.info(f"Using random port: {port}")
commandline_args = [
"--model", model,
"--device", device,
"--port", port,
]
if desktop:
commandline_args.extend(["--gui", "--gui-size", "1400", "900"])
model_dir = os.environ.get('MODEL_DIR', "")
if model_dir:
commandline_args.extend(["--model-dir", model_dir])
commandline_args = ' '.join([str(it) for it in commandline_args])
env_commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
c.run(
f"lama-cleaner {env_commandline_args} {commandline_args}"
)

View File

@ -11,6 +11,6 @@ set PATH=C:\Windows\System32;%PATH%
@call pip3 install -U lama-cleaner
@call invoke config
@call lama-cleaner --config-installer --installer-config %0\..\installer_config.json
PAUSE

View File

@ -4,8 +4,6 @@ set PATH=C:\Windows\System32;%PATH%
@call installer\Scripts\activate.bat
set MODEL_DIR=
set COMMANDLINE_ARGS=
@call invoke start
@call lama-cleaner --load-installer-config --installer-config %0\..\installer_config.json
PAUSE