Merge branch 'diffusers_0.9'
This commit is contained in:
commit
e15146dad4
@ -1,7 +1,7 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "/static/css/main.bb67386a.chunk.css",
|
||||
"main.js": "/static/js/main.3c80dc94.chunk.js",
|
||||
"main.js": "/static/js/main.5cf6948e.chunk.js",
|
||||
"runtime-main.js": "/static/js/runtime-main.5e86ac81.js",
|
||||
"static/js/2.ee9dcc6c.chunk.js": "/static/js/2.ee9dcc6c.chunk.js",
|
||||
"index.html": "/index.html",
|
||||
@ -12,6 +12,6 @@
|
||||
"static/js/runtime-main.5e86ac81.js",
|
||||
"static/js/2.ee9dcc6c.chunk.js",
|
||||
"static/css/main.bb67386a.chunk.css",
|
||||
"static/js/main.3c80dc94.chunk.js"
|
||||
"static/js/main.5cf6948e.chunk.js"
|
||||
]
|
||||
}
|
@ -1 +1 @@
|
||||
<!doctype html><html lang="en"><head><meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate"/><meta http-equiv="Pragma" content="no-cache"/><meta http-equiv="Expires" content="0"/><meta charset="utf-8"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=0"/><meta name="theme-color" content="#ffffff"/><title>lama-cleaner - Image inpainting powered by SOTA AI model</title><link href="/static/css/main.bb67386a.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,l,a=r[0],f=r[1],i=r[2],p=0,s=[];p<a.length;p++)l=a[p],Object.prototype.hasOwnProperty.call(o,l)&&o[l]&&s.push(o[l][0]),o[l]=0;for(n in f)Object.prototype.hasOwnProperty.call(f,n)&&(e[n]=f[n]);for(c&&c(r);s.length;)s.shift()();return u.push.apply(u,i||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var f=t[a];0!==o[f]&&(n=!1)}n&&(u.splice(r--,1),e=l(l.s=t[0]))}return e}var n={},o={1:0},u=[];function l(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,l),t.l=!0,t.exports}l.m=e,l.c=n,l.d=function(e,r,t){l.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},l.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},l.t=function(e,r){if(1&r&&(e=l(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(l.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)l.d(t,n,function(r){return e[r]}.bind(null,n));return t},l.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return l.d(r,"a",r),r},l.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},l.p="/";var a=this["webpackJsonplama-cleaner"]=this["webpackJsonplama-cleaner"]||[],f=a.push.bind(a);a.push=r,a=a.slice();for(var i=0;i<a.length;i++)r(a[i]);var c=f;t()}([])</script><script src="/static/js/2.ee9dcc6c.chunk.js"></script><script src="/static/js/main.3c80dc94.chunk.js"></script></body></html>
|
||||
<!doctype html><html lang="en"><head><meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate"/><meta http-equiv="Pragma" content="no-cache"/><meta http-equiv="Expires" content="0"/><meta charset="utf-8"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=0"/><meta name="theme-color" content="#ffffff"/><title>lama-cleaner - Image inpainting powered by SOTA AI model</title><link href="/static/css/main.bb67386a.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,l,a=r[0],f=r[1],i=r[2],p=0,s=[];p<a.length;p++)l=a[p],Object.prototype.hasOwnProperty.call(o,l)&&o[l]&&s.push(o[l][0]),o[l]=0;for(n in f)Object.prototype.hasOwnProperty.call(f,n)&&(e[n]=f[n]);for(c&&c(r);s.length;)s.shift()();return u.push.apply(u,i||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var f=t[a];0!==o[f]&&(n=!1)}n&&(u.splice(r--,1),e=l(l.s=t[0]))}return e}var n={},o={1:0},u=[];function l(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,l),t.l=!0,t.exports}l.m=e,l.c=n,l.d=function(e,r,t){l.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},l.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},l.t=function(e,r){if(1&r&&(e=l(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(l.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)l.d(t,n,function(r){return e[r]}.bind(null,n));return t},l.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return l.d(r,"a",r),r},l.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},l.p="/";var a=this["webpackJsonplama-cleaner"]=this["webpackJsonplama-cleaner"]||[],f=a.push.bind(a);a.push=r,a=a.slice();for(var i=0;i<a.length;i++)r(a[i]);var c=f;t()}([])</script><script src="/static/js/2.ee9dcc6c.chunk.js"></script><script src="/static/js/main.5cf6948e.chunk.js"></script></body></html>
|
File diff suppressed because one or more lines are too long
1
lama_cleaner/app/build/static/js/main.5cf6948e.chunk.js
Normal file
1
lama_cleaner/app/build/static/js/main.5cf6948e.chunk.js
Normal file
File diff suppressed because one or more lines are too long
@ -191,6 +191,8 @@ function ModelSettingBlock() {
|
||||
return renderFCFModelDesc()
|
||||
case AIModel.SD15:
|
||||
return undefined
|
||||
case AIModel.SD2:
|
||||
return undefined
|
||||
case AIModel.Mange:
|
||||
return undefined
|
||||
case AIModel.CV2:
|
||||
@ -234,10 +236,16 @@ function ModelSettingBlock() {
|
||||
)
|
||||
case AIModel.SD15:
|
||||
return renderModelDesc(
|
||||
'Stable Diffusion',
|
||||
'Stable Diffusion 1.5',
|
||||
'https://ommer-lab.com/research/latent-diffusion-models/',
|
||||
'https://github.com/CompVis/stable-diffusion'
|
||||
)
|
||||
case AIModel.SD2:
|
||||
return renderModelDesc(
|
||||
'Stable Diffusion 2',
|
||||
'https://ommer-lab.com/research/latent-diffusion-models/',
|
||||
'https://github.com/Stability-AI/stablediffusion'
|
||||
)
|
||||
case AIModel.Mange:
|
||||
return renderModelDesc(
|
||||
'Manga Inpainting',
|
||||
|
@ -10,6 +10,7 @@ export enum AIModel {
|
||||
MAT = 'mat',
|
||||
FCF = 'fcf',
|
||||
SD15 = 'sd1.5',
|
||||
SD2 = 'sd2',
|
||||
CV2 = 'cv2',
|
||||
Mange = 'manga',
|
||||
}
|
||||
@ -294,7 +295,14 @@ const defaultHDSettings: ModelsHDSettings = {
|
||||
hdStrategyResizeLimit: 768,
|
||||
hdStrategyCropTrigerSize: 512,
|
||||
hdStrategyCropMargin: 128,
|
||||
enabled: true,
|
||||
enabled: false,
|
||||
},
|
||||
[AIModel.SD2]: {
|
||||
hdStrategy: HDStrategy.ORIGINAL,
|
||||
hdStrategyResizeLimit: 768,
|
||||
hdStrategyCropTrigerSize: 512,
|
||||
hdStrategyCropMargin: 128,
|
||||
enabled: false,
|
||||
},
|
||||
[AIModel.Mange]: {
|
||||
hdStrategy: HDStrategy.CROP,
|
||||
@ -318,6 +326,7 @@ export enum SDSampler {
|
||||
klms = 'k_lms',
|
||||
kEuler = 'k_euler',
|
||||
kEulerA = 'k_euler_a',
|
||||
dpmPlusPlus = 'dpm++',
|
||||
}
|
||||
|
||||
export enum SDMode {
|
||||
@ -422,7 +431,7 @@ export const isSDState = selector({
|
||||
key: 'isSD',
|
||||
get: ({ get }) => {
|
||||
const settings = get(settingState)
|
||||
return settings.model === AIModel.SD15
|
||||
return settings.model === AIModel.SD15 || settings.model === AIModel.SD2
|
||||
},
|
||||
})
|
||||
|
||||
|
@ -5,7 +5,7 @@ import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from diffusers import PNDMScheduler, DDIMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, \
|
||||
EulerAncestralDiscreteScheduler
|
||||
EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler
|
||||
from loguru import logger
|
||||
|
||||
from lama_cleaner.model.base import InpaintModel
|
||||
@ -102,27 +102,20 @@ class SD(InpaintModel):
|
||||
# image = torch.from_numpy(image).unsqueeze(0).to(self.device)
|
||||
# mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
|
||||
|
||||
scheduler_kwargs = dict(
|
||||
beta_schedule="scaled_linear",
|
||||
beta_start=0.00085,
|
||||
beta_end=0.012,
|
||||
num_train_timesteps=1000,
|
||||
)
|
||||
scheduler_config = self.model.scheduler.config
|
||||
|
||||
if config.sd_sampler == SDSampler.ddim:
|
||||
scheduler = DDIMScheduler(
|
||||
**scheduler_kwargs,
|
||||
clip_sample=False,
|
||||
set_alpha_to_one=False,
|
||||
)
|
||||
scheduler = DDIMScheduler.from_config(scheduler_config)
|
||||
elif config.sd_sampler == SDSampler.pndm:
|
||||
scheduler = PNDMScheduler(**scheduler_kwargs, skip_prk_steps=True)
|
||||
scheduler = PNDMScheduler.from_config(scheduler_config)
|
||||
elif config.sd_sampler == SDSampler.k_lms:
|
||||
scheduler = LMSDiscreteScheduler(**scheduler_kwargs)
|
||||
scheduler = LMSDiscreteScheduler.from_config(scheduler_config)
|
||||
elif config.sd_sampler == SDSampler.k_euler:
|
||||
scheduler = EulerDiscreteScheduler(**scheduler_kwargs)
|
||||
scheduler = EulerDiscreteScheduler.from_config(scheduler_config)
|
||||
elif config.sd_sampler == SDSampler.k_euler_a:
|
||||
scheduler = EulerAncestralDiscreteScheduler(**scheduler_kwargs)
|
||||
scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler_config)
|
||||
elif config.sd_sampler == SDSampler.dpm_plus_plus:
|
||||
scheduler = DPMSolverMultistepScheduler.from_config(scheduler_config)
|
||||
else:
|
||||
raise ValueError(config.sd_sampler)
|
||||
|
||||
@ -138,13 +131,10 @@ class SD(InpaintModel):
|
||||
k = 2 * config.sd_mask_blur + 1
|
||||
mask = cv2.GaussianBlur(mask, (k, k), 0)[:, :, np.newaxis]
|
||||
|
||||
_kwargs = {
|
||||
self.image_key: PIL.Image.fromarray(image),
|
||||
}
|
||||
|
||||
img_h, img_w = image.shape[:2]
|
||||
|
||||
output = self.model(
|
||||
image=PIL.Image.fromarray(image),
|
||||
prompt=config.prompt,
|
||||
negative_prompt=config.negative_prompt,
|
||||
mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"),
|
||||
@ -155,7 +145,6 @@ class SD(InpaintModel):
|
||||
callback=self.callback,
|
||||
height=img_h,
|
||||
width=img_w,
|
||||
**_kwargs
|
||||
).images[0]
|
||||
|
||||
output = (output * 255).round().astype("uint8")
|
||||
@ -217,4 +206,7 @@ class SD(InpaintModel):
|
||||
|
||||
class SD15(SD):
|
||||
model_id_or_path = "runwayml/stable-diffusion-inpainting"
|
||||
image_key = "image"
|
||||
|
||||
|
||||
class SD2(SD):
|
||||
model_id_or_path = "stabilityai/stable-diffusion-2-inpainting"
|
||||
|
@ -5,12 +5,13 @@ from lama_cleaner.model.lama import LaMa
|
||||
from lama_cleaner.model.ldm import LDM
|
||||
from lama_cleaner.model.manga import Manga
|
||||
from lama_cleaner.model.mat import MAT
|
||||
from lama_cleaner.model.sd import SD15
|
||||
from lama_cleaner.model.sd import SD15, SD2
|
||||
from lama_cleaner.model.zits import ZITS
|
||||
from lama_cleaner.model.opencv2 import OpenCV2
|
||||
from lama_cleaner.schema import Config
|
||||
|
||||
models = {"lama": LaMa, "ldm": LDM, "zits": ZITS, "mat": MAT, "fcf": FcF, "sd1.5": SD15, "cv2": OpenCV2, "manga": Manga}
|
||||
models = {"lama": LaMa, "ldm": LDM, "zits": ZITS, "mat": MAT, "fcf": FcF, "sd1.5": SD15, "cv2": OpenCV2, "manga": Manga,
|
||||
"sd2": SD2}
|
||||
|
||||
|
||||
class ModelManager:
|
||||
|
@ -10,7 +10,7 @@ def parse_args():
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
default="lama",
|
||||
choices=["lama", "ldm", "zits", "mat", "fcf", "sd1.5", "cv2", "manga"],
|
||||
choices=["lama", "ldm", "zits", "mat", "fcf", "sd1.5", "cv2", "manga", "sd2"],
|
||||
)
|
||||
parser.add_argument(
|
||||
"--hf_access_token",
|
||||
@ -37,7 +37,7 @@ def parse_args():
|
||||
action="store_true",
|
||||
help="Enable xFormers optimizations. Requires that xformers package has been installed. See: https://github.com/facebookresearch/xformers"
|
||||
)
|
||||
parser.add_argument("--device", default="cuda", type=str, choices=["cuda", "cpu"])
|
||||
parser.add_argument("--device", default="cuda", type=str, choices=["cuda", "cpu", "mps"])
|
||||
parser.add_argument("--gui", action="store_true", help="Launch as desktop app")
|
||||
parser.add_argument(
|
||||
"--gui-size",
|
||||
@ -59,7 +59,7 @@ def parse_args():
|
||||
if imghdr.what(args.input) is None:
|
||||
parser.error(f"invalid --input: {args.input} is not a valid image file")
|
||||
|
||||
if args.model.startswith("sd") and not args.sd_run_local:
|
||||
if args.model == 'sd1.5' and not args.sd_run_local:
|
||||
if not args.hf_access_token.startswith("hf_"):
|
||||
parser.error(
|
||||
f"sd(stable-diffusion) model requires huggingface access token. Check how to get token from: https://huggingface.co/docs/hub/security-tokens"
|
||||
|
@ -25,6 +25,7 @@ class SDSampler(str, Enum):
|
||||
k_lms = "k_lms"
|
||||
k_euler = 'k_euler'
|
||||
k_euler_a = 'k_euler_a'
|
||||
dpm_plus_plus = 'dpm++'
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
|
@ -10,5 +10,5 @@ pytest
|
||||
yacs
|
||||
markupsafe==2.0.1
|
||||
scikit-image==0.19.3
|
||||
diffusers[torch]==0.7.2
|
||||
diffusers[torch]==0.9
|
||||
transformers==4.21.0
|
||||
|
Loading…
Reference in New Issue
Block a user