diff --git a/README.md b/README.md index 013e700..04fffca 100644 --- a/README.md +++ b/README.md @@ -57,16 +57,19 @@ lama-cleaner --model=lama --device=cpu --port=8080 Available arguments: -| Name | Description | Default | -| ----------------- | -------------------------------------------------------------------------------------------------------- | -------- | -| --model | lama/ldm/zits/mat/fcf/sd. See details in [Inpaint Model](#inpainting-model) | lama | -| --hf_access_token | stable-diffusion(sd) model need huggingface access token https://huggingface.co/docs/hub/security-tokens | | -| --device | cuda or cpu | cuda | -| --port | Port for backend flask web server | 8080 | -| --gui | Launch lama-cleaner as a desktop application | | -| --gui_size | Set the window size for the application | 1200 900 | -| --input | Path to image you want to load by default | None | -| --debug | Enable debug mode for flask web server | | +| Name | Description | Default | +|-------------------|-------------------------------------------------------------------------------------------------------------------------------| -------- | +| --model | lama/ldm/zits/mat/fcf/sd1.4 See details in [Inpaint Model](#inpainting-model) | lama | +| --hf_access_token | stable-diffusion(sd) model need [huggingface access token](https://huggingface.co/docs/hub/security-tokens) to download model | | +| --sd-run-local | Once the model as downloaded, you can pass this arg and remove `--hf_access_token` | | +| --sd-disable-nsfw | Disable stable-diffusion NSFW checker. | | +| --sd-cpu-textencoder | Always run stable-diffusion TextEncoder model on CPU. | | +| --device | cuda or cpu | cuda | +| --port | Port for backend flask web server | 8080 | +| --gui | Launch lama-cleaner as a desktop application | | +| --gui_size | Set the window size for the application | 1200 900 | +| --input | Path to image you want to load by default | None | +| --debug | Enable debug mode for flask web server | | ## Inpainting Model diff --git a/lama_cleaner/model/sd.py b/lama_cleaner/model/sd.py index 8aaeab8..a1cd881 100644 --- a/lama_cleaner/model/sd.py +++ b/lama_cleaner/model/sd.py @@ -70,7 +70,7 @@ class SD(InpaintModel): def init_model(self, device: torch.device, **kwargs): from .sd_pipeline import StableDiffusionInpaintPipeline - model_kwargs = {} + model_kwargs = {"local_files_only": kwargs['sd_run_local']} if kwargs['sd_disable_nsfw']: logger.info("Disable Stable Diffusion Model NSFW checker") model_kwargs.update(dict( diff --git a/lama_cleaner/parse_args.py b/lama_cleaner/parse_args.py index b876dde..e96bb64 100644 --- a/lama_cleaner/parse_args.py +++ b/lama_cleaner/parse_args.py @@ -20,13 +20,18 @@ def parse_args(): parser.add_argument( "--sd-disable-nsfw", action="store_true", - help="Disable Stable Diffusion nsfw checker", + help="Disable Stable Diffusion NSFW checker", ) parser.add_argument( "--sd-cpu-textencoder", action="store_true", help="Always run Stable Diffusion TextEncoder model on CPU", ) + parser.add_argument( + "--sd-run-local", + action="store_true", + help="After first time Stable Diffusion model downloaded, you can add this arg and remove --hf_access_token", + ) parser.add_argument("--device", default="cuda", type=str, choices=["cuda", "cpu"]) parser.add_argument("--gui", action="store_true", help="Launch as desktop app") parser.add_argument( @@ -48,7 +53,7 @@ def parse_args(): if imghdr.what(args.input) is None: parser.error(f"invalid --input: {args.input} is not a valid image file") - if args.model.startswith("sd"): + if args.model.startswith("sd") and not args.sd_run_local: if not args.hf_access_token.startswith("hf_"): parser.error( f"sd(stable-diffusion) model requires huggingface access token. Check how to get token from: https://huggingface.co/docs/hub/security-tokens" diff --git a/lama_cleaner/server.py b/lama_cleaner/server.py index 8fcd332..c72c782 100644 --- a/lama_cleaner/server.py +++ b/lama_cleaner/server.py @@ -220,6 +220,7 @@ def main(args): hf_access_token=args.hf_access_token, sd_disable_nsfw=args.sd_disable_nsfw, sd_cpu_textencoder=args.sd_cpu_textencoder, + sd_run_local=args.sd_run_local, callbacks=[diffuser_callback], )