fix cache_dir in main.py
This commit is contained in:
parent
112a339baf
commit
10fb00311f
10
README.md
10
README.md
@ -19,22 +19,26 @@ You can experience their great online services [here](https://cleanup.pictures/)
|
|||||||
- Build: `yarn build`
|
- Build: `yarn build`
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
Run within a Docker container. Set the `cache_dir` to models location path.
|
|
||||||
|
Run within a Docker container. Set the `CACHE_DIR` to models location path.
|
||||||
Optionally add a `-d` option to the `docker run` command below to run as a daemon.
|
Optionally add a `-d` option to the `docker run` command below to run as a daemon.
|
||||||
|
|
||||||
### Build Docker image
|
### Build Docker image
|
||||||
|
|
||||||
```
|
```
|
||||||
docker build -f Dockerfile -t lamacleaner .
|
docker build -f Dockerfile -t lamacleaner .
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run Docker (cpu)
|
### Run Docker (cpu)
|
||||||
|
|
||||||
```
|
```
|
||||||
docker run -p 8080:8080 -e cache_dir=/app/models -v $(pwd)/models:/app/models -v $(pwd):/app --rm lamacleaner python3 main.py --device=cpu --port=8080
|
docker run -p 8080:8080 -e CACHE_DIR=/app/models -v $(pwd)/models:/app/models -v $(pwd):/app --rm lamacleaner python3 main.py --device=cpu --port=8080
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run Docker (gpu)
|
### Run Docker (gpu)
|
||||||
|
|
||||||
```
|
```
|
||||||
docker run --gpus all -p 8080:8080 -e cache_dir=/app/models -v $(pwd)/models:/app/models -v $(pwd):/app --rm lamacleaner python3 main.py --device=cuda --port=8080
|
docker run --gpus all -p 8080:8080 -e CACHE_DIR=/app/models -v $(pwd)/models:/app/models -v $(pwd):/app --rm lamacleaner python3 main.py --device=cuda --port=8080
|
||||||
```
|
```
|
||||||
|
|
||||||
Then open [http://localhost:8080](http://localhost:8080)
|
Then open [http://localhost:8080](http://localhost:8080)
|
4
main.py
4
main.py
@ -26,8 +26,8 @@ os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
|
|||||||
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
|
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
|
||||||
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
|
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
|
||||||
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
|
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
|
||||||
if os.environ["cache_dir"]:
|
if os.environ.get("CACHE_DIR"):
|
||||||
os.environ["TORCH_HOME"] = os.environ["cache_dir"]
|
os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"]
|
||||||
|
|
||||||
BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "./lama_cleaner/app/build")
|
BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "./lama_cleaner/app/build")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user