Added Dockerfile

This commit is contained in:
Loreto Parisi 2021-11-15 20:11:46 +01:00
parent 4e027f81e6
commit e839a36503
4 changed files with 57 additions and 2 deletions

36
Dockerfile Normal file
View File

@ -0,0 +1,36 @@
#
# Lama Cleaner Dockerfile
# @author Loreto Parisi (loretoparisi at gmail dot com)
#
FROM python:3.7.4-slim-buster
LABEL maintainer Loreto Parisi loretoparisi@gmail.com
WORKDIR app
RUN apt-get update && apt-get install -y --no-install-recommends \
software-properties-common \
libsm6 libxext6 ffmpeg libfontconfig1 libxrender1 libgl1-mesa-glx \
curl \
npm
# python requirements
COPY . .
COPY requirements.txt /etc/tmp/requirements.txt
RUN pip install -r /etc/tmp/requirements.txt
# nodejs
RUN npm install n -g && \
n lts
# yarn
RUN npm install -g yarn
# webapp
RUN cd lama_cleaner/app/ && \
yarn && \
yarn build
EXPOSE 8080
CMD ["bash"]

View File

@ -17,3 +17,17 @@ You can experience their great online services [here](https://cleanup.pictures/)
- Install dependencies:`cd lama_cleaner/app/ && yarn` - Install dependencies:`cd lama_cleaner/app/ && yarn`
- Start development server: `yarn dev` - Start development server: `yarn dev`
- Build: `yarn build` - Build: `yarn build`
## Docker (cpu)
```
docker build -f Dockerfile -t lamacleaner .
docker run -p 8080:8080 -e cache_dir=/app/models -v models:/app/models -v $(pwd):/app --rm lamacleaner python3 main.py --device=cpu --port=8080
```
## Docker (gpu)
```
docker build -f Dockerfile -t lamacleaner .
docker run --gpus all -p 8080:8080 -e cache_dir=/app/models -v models:/app/models -v $(pwd):/app --rm lamacleaner python3 main.py --device=gpu --port=8080
```
Then open [http://localhost:8080](http://localhost:8080)

View File

@ -17,8 +17,9 @@ def download_model(url=LAMA_MODEL_URL):
parts = urlparse(url) parts = urlparse(url)
hub_dir = get_dir() hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints") model_dir = os.path.join(hub_dir, "checkpoints")
if not os.path.isdir(model_dir):
os.makedirs(os.path.join(model_dir, "hub", "checkpoints"))
filename = os.path.basename(parts.path) filename = os.path.basename(parts.path)
cached_file = os.path.join(model_dir, filename) cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file): if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))

View File

@ -17,12 +17,16 @@ from lama_cleaner.helper import (
pad_img_to_modulo, pad_img_to_modulo,
) )
NUM_THREADS = "4" import multiprocessing
NUM_THREADS = str(multiprocessing.cpu_count())
os.environ["OMP_NUM_THREADS"] = NUM_THREADS os.environ["OMP_NUM_THREADS"] = NUM_THREADS
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
os.environ["MKL_NUM_THREADS"] = NUM_THREADS os.environ["MKL_NUM_THREADS"] = NUM_THREADS
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
os.environ["TORCH_HOME"] = os.environ["cache_dir"]
BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "./lama_cleaner/app/build") BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "./lama_cleaner/app/build")