update one click installer

This commit is contained in:
Qing 2022-10-24 18:32:35 +08:00
parent 476159bbe5
commit 1883d9ec4a
11 changed files with 188 additions and 35 deletions

View File

@ -86,21 +86,23 @@ get an access token from here [huggingface access token](https://huggingface.co/
If you prefer to use docker, you can check out [docker](#docker)
If you hava no idea what is docker or pip, please check [One Click Installer](./scripts/README.md)
Available command line arguments:
| Name | Description | Default |
| -------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -------- |
| --model | lama/ldm/zits/mat/fcf/sd1.5 See details in [Inpaint Model](#inpainting-model) | lama |
| Name | Description | Default |
| -------------------- | ------------------------------------------------------------------------------------------------------------------- | -------- |
| --model | lama/ldm/zits/mat/fcf/sd1.5 See details in [Inpaint Model](#inpainting-model) | lama |
| --hf_access_token | stable-diffusion need [huggingface access token](https://huggingface.co/docs/hub/security-tokens) to download model | |
| --sd-run-local | Once the model as downloaded, you can pass this arg and remove `--hf_access_token` | |
| --sd-disable-nsfw | Disable stable-diffusion NSFW checker. | |
| --sd-cpu-textencoder | Always run stable-diffusion TextEncoder model on CPU. | |
| --device | cuda or cpu | cuda |
| --port | Port for backend flask web server | 8080 |
| --gui | Launch lama-cleaner as a desktop application | |
| --gui_size | Set the window size for the application | 1200 900 |
| --input | Path to image you want to load by default | None |
| --debug | Enable debug mode for flask web server | |
| --sd-run-local | Once the model as downloaded, you can pass this arg and remove `--hf_access_token` | |
| --sd-disable-nsfw | Disable stable-diffusion NSFW checker. | |
| --sd-cpu-textencoder | Always run stable-diffusion TextEncoder model on CPU. | |
| --device | cuda or cpu | cuda |
| --port | Port for backend flask web server | 8080 |
| --gui | Launch lama-cleaner as a desktop application | |
| --gui_size | Set the window size for the application | 1200 900 |
| --input | Path to image you want to load by default | None |
| --debug | Enable debug mode for flask web server | |
## Inpainting Model
@ -228,7 +230,3 @@ gpu & cpu
```
docker build -f ./docker/GPUDockerfile -t lamacleaner .
```
## One Click Installer
TODO

2
scripts/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
lama-cleaner/
*.zip

15
scripts/README.md Normal file
View File

@ -0,0 +1,15 @@
# Lama Cleaner One Click Installer
## macOS
1. Download [lama-cleaner.zip]()
1. Unpack lama-cleaner.zip
1. Double click `mac_config.command`, follow the guide in the terminal to choice model and set other configs.
- lama model: State of the art image inpainting AI model, useful to remove any unwanted object, defect, people from your pictures.
- sd1.5 model: Stable Diffusion model, text-driven image editing. To use this model you need to [accepting the terms to access](https://huggingface.co/runwayml/stable-diffusion-inpainting), and
get an access token from here [huggingface access token](https://huggingface.co/docs/hub/security-tokens).
1. Double click `mac_start.command` to start the server.
## Windows
coming soon...

View File

@ -4,4 +4,7 @@ channels:
- conda-forge
dependencies:
- conda
- git
- git-lfs
- invoke
- rich

22
scripts/pack.bat Normal file
View File

@ -0,0 +1,22 @@
@echo off
export "PYTHONNOUSERSITE=1"
SET BUILD_DIST=lama-cleaner
SET BUILD_ENV=installer
SET USER_SCRIPTS=user_scripts
echo "Creating a distributable package.."
source "~\miniconda3\etc\profile.d\conda.sh"
conda "install" "-c" "conda-forge" "-y" "conda-pack"
conda "env" "create" "--prefix" "%BUILD_ENV%" "-f" "environment.yaml"
conda "activate" "%CD%\%BUILD_ENV%"
conda "pack" "--n-threads" "-1" "--prefix" "%BUILD_ENV%" "--format" "tar"
mkdir "-p" "%BUILD_DIST%/%BUILD_ENV%"
echo "Copy user scripts file %USER_SCRIPTS%"
COPY "%USER_SCRIPTS%/*" "%BUILD_DIST%"
cd "%BUILD_DIST%"
tar "-xf" "%CD%.\%BUILD_ENV%%CD%tar" "-C" "%BUILD_ENV%"
cd "%CD%."
DEL /S "%BUILD_ENV%"
DEL "%BUILD_ENV%%CD%tar"
echo "zip %BUILD_DIST%%CD%zip"
zip "-q" "-r" "%BUILD_DIST%%CD%zip" "%BUILD_DIST%"

View File

@ -1,11 +1,14 @@
#!/bin/bash
# Prepare basic python environment
set -e
# Ensuer not use user's python package
export PYTHONNOUSERSITE=1
BUILD_DIST=lama-cleaner
BUILD_ENV=installer
USER_SCRIPTS=user_scripts
echo "Creating a distributable package.."
@ -20,9 +23,8 @@ conda pack --n-threads -1 --prefix $BUILD_ENV --format tar
mkdir -p ${BUILD_DIST}/$BUILD_ENV
echo "Copy project file.."
chmod u+x start.sh
cp start.sh $BUILD_DIST
echo "Copy user scripts file ${USER_SCRIPTS}"
cp ${USER_SCRIPTS}/* $BUILD_DIST
cd $BUILD_DIST
tar -xf ../${BUILD_ENV}.tar -C $BUILD_ENV
@ -31,5 +33,6 @@ cd ..
rm -rf $BUILD_ENV
rm ${BUILD_ENV}.tar
zip -r $BUILD_DIST.zip $BUILD_DIST
echo "zip ${BUILD_DIST}.zip"
zip -q -r $BUILD_DIST.zip $BUILD_DIST

View File

@ -1,14 +0,0 @@
#!/bin/bash
source installer/bin/activate
conda-unpack
conda --version
git --version
echo Using `which pip3`
pip3 install lama-cleaner
# TODO: add model input prompt
lama-cleaner --device cpu --model lama

View File

View File

@ -0,0 +1,13 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
echo `pwd`
source ./installer/bin/activate
conda-unpack
pip3 install -U lama-cleaner
invoke config --disable-device-choice

View File

@ -0,0 +1,11 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
echo `pwd`
source ./installer/bin/activate
conda-unpack
invoke start

View File

@ -0,0 +1,100 @@
import os
import json
from enum import Enum
import socket
import logging
from contextlib import closing
from invoke import task
from rich import print
from rich.prompt import IntPrompt, Prompt, Confirm
from rich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(
level="INFO", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]
)
log = logging.getLogger("lama-cleaner")
def find_free_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
CONFIG_PATH = "config.json"
class MODEL(str, Enum):
SD15 = "sd1.5"
LAMA = 'lama'
class DEVICE(str, Enum):
CUDA = "cuda"
CPU = "cpu"
@task
def info(c):
print("Environment information".center(60, "-"))
try:
c.run("git --version")
c.run("conda --version")
c.run("which python")
c.run("python --version")
c.run("which pip")
c.run("pip --version")
c.run("pip list | grep lama")
except:
pass
print("-"*60)
@task(pre=[info])
def config(c, disable_device_choice=False):
# TODO: 提示选择模型选择设备端口host
# 如果是 sd 模型,提示接受条款和输入 huggingface token
model = Prompt.ask("Choice model", choices=[MODEL.SD15, MODEL.LAMA], default=MODEL.SD15)
hf_access_token = ""
if model == MODEL.SD15:
while True:
hf_access_token = Prompt.ask("Huggingface access token (https://huggingface.co/docs/hub/security-tokens)")
if hf_access_token == "":
log.warning("Access token is required to download model")
else:
break
if disable_device_choice:
device = DEVICE.CPU
else:
device = Prompt.ask("Choice device", choices=[DEVICE.CUDA, DEVICE.CPU], default=DEVICE.CUDA)
if device == DEVICE.CUDA:
import torch
if not torch.cuda.is_available():
log.warning("Did not find CUDA device on your computer, fallback to cpu")
device = DEVICE.CPU
configs = {"model": model, "device": device, "hf_access_token": hf_access_token}
log.info(f"Save config to {CONFIG_PATH}")
with open(CONFIG_PATH, 'w', encoding='utf-8') as f:
json.dump(configs, f, indent=2, ensure_ascii=False)
log.info(f"Config finish, you can close this window.")
@task(pre=[info])
def start(c):
if not os.path.exists(CONFIG_PATH):
log.info("Config file not exists, please run config.sh first")
exit()
log.info(f"Load config from {CONFIG_PATH}")
with open(CONFIG_PATH, 'r', encoding='utf-8') as f:
configs = json.load(f)
model = configs['model']
device = configs['device']
hf_access_token = configs['hf_access_token']
port = find_free_port()
log.info(f"Using random port: {port}")
c.run(f"lama-cleaner --model {model} --device {device} --hf_access_token={hf_access_token} --port {port} --gui --gui-size 1400 900")