commit
112a339baf
36
Dockerfile
Normal file
36
Dockerfile
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
#
|
||||||
|
# Lama Cleaner Dockerfile
|
||||||
|
# @author Loreto Parisi (loretoparisi at gmail dot com)
|
||||||
|
#
|
||||||
|
|
||||||
|
FROM python:3.7.4-slim-buster
|
||||||
|
|
||||||
|
LABEL maintainer Loreto Parisi loretoparisi@gmail.com
|
||||||
|
|
||||||
|
WORKDIR app
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
software-properties-common \
|
||||||
|
libsm6 libxext6 ffmpeg libfontconfig1 libxrender1 libgl1-mesa-glx \
|
||||||
|
curl \
|
||||||
|
npm
|
||||||
|
|
||||||
|
# python requirements
|
||||||
|
COPY . .
|
||||||
|
COPY requirements.txt /etc/tmp/requirements.txt
|
||||||
|
RUN pip install -r /etc/tmp/requirements.txt
|
||||||
|
|
||||||
|
# nodejs
|
||||||
|
RUN npm install n -g && \
|
||||||
|
n lts
|
||||||
|
# yarn
|
||||||
|
RUN npm install -g yarn
|
||||||
|
|
||||||
|
# webapp
|
||||||
|
RUN cd lama_cleaner/app/ && \
|
||||||
|
yarn && \
|
||||||
|
yarn build
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
CMD ["bash"]
|
21
README.md
21
README.md
@ -17,3 +17,24 @@ You can experience their great online services [here](https://cleanup.pictures/)
|
|||||||
- Install dependencies:`cd lama_cleaner/app/ && yarn`
|
- Install dependencies:`cd lama_cleaner/app/ && yarn`
|
||||||
- Start development server: `yarn dev`
|
- Start development server: `yarn dev`
|
||||||
- Build: `yarn build`
|
- Build: `yarn build`
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
Run within a Docker container. Set the `cache_dir` to models location path.
|
||||||
|
Optionally add a `-d` option to the `docker run` command below to run as a daemon.
|
||||||
|
|
||||||
|
### Build Docker image
|
||||||
|
```
|
||||||
|
docker build -f Dockerfile -t lamacleaner .
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Docker (cpu)
|
||||||
|
```
|
||||||
|
docker run -p 8080:8080 -e cache_dir=/app/models -v $(pwd)/models:/app/models -v $(pwd):/app --rm lamacleaner python3 main.py --device=cpu --port=8080
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Docker (gpu)
|
||||||
|
```
|
||||||
|
docker run --gpus all -p 8080:8080 -e cache_dir=/app/models -v $(pwd)/models:/app/models -v $(pwd):/app --rm lamacleaner python3 main.py --device=cuda --port=8080
|
||||||
|
```
|
||||||
|
|
||||||
|
Then open [http://localhost:8080](http://localhost:8080)
|
@ -17,8 +17,9 @@ def download_model(url=LAMA_MODEL_URL):
|
|||||||
parts = urlparse(url)
|
parts = urlparse(url)
|
||||||
hub_dir = get_dir()
|
hub_dir = get_dir()
|
||||||
model_dir = os.path.join(hub_dir, "checkpoints")
|
model_dir = os.path.join(hub_dir, "checkpoints")
|
||||||
|
if not os.path.isdir(model_dir):
|
||||||
|
os.makedirs(os.path.join(model_dir, "hub", "checkpoints"))
|
||||||
filename = os.path.basename(parts.path)
|
filename = os.path.basename(parts.path)
|
||||||
|
|
||||||
cached_file = os.path.join(model_dir, filename)
|
cached_file = os.path.join(model_dir, filename)
|
||||||
if not os.path.exists(cached_file):
|
if not os.path.exists(cached_file):
|
||||||
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
|
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
|
||||||
|
7
main.py
7
main.py
@ -17,12 +17,17 @@ from lama_cleaner.helper import (
|
|||||||
pad_img_to_modulo,
|
pad_img_to_modulo,
|
||||||
)
|
)
|
||||||
|
|
||||||
NUM_THREADS = "4"
|
import multiprocessing
|
||||||
|
|
||||||
|
NUM_THREADS = str(multiprocessing.cpu_count())
|
||||||
|
|
||||||
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
|
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
|
||||||
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
|
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
|
||||||
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
|
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
|
||||||
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
|
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
|
||||||
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
|
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
|
||||||
|
if os.environ["cache_dir"]:
|
||||||
|
os.environ["TORCH_HOME"] = os.environ["cache_dir"]
|
||||||
|
|
||||||
BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "./lama_cleaner/app/build")
|
BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "./lama_cleaner/app/build")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user