add euler sampler

This commit is contained in:
Qing 2022-11-15 21:09:51 +08:00
parent 6503d7ec32
commit d7c3149f67
5 changed files with 144 additions and 86 deletions

View File

@ -231,6 +231,8 @@ export enum SDSampler {
ddim = 'ddim', ddim = 'ddim',
pndm = 'pndm', pndm = 'pndm',
klms = 'k_lms', klms = 'k_lms',
kEuler = 'k_euler',
kEulerA = 'k_euler_a',
} }
export enum SDMode { export enum SDMode {

View File

@ -4,7 +4,8 @@ import PIL.Image
import cv2 import cv2
import numpy as np import numpy as np
import torch import torch
from diffusers import PNDMScheduler, DDIMScheduler, LMSDiscreteScheduler from diffusers import PNDMScheduler, DDIMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, \
EulerAncestralDiscreteScheduler
from loguru import logger from loguru import logger
from lama_cleaner.model.base import InpaintModel from lama_cleaner.model.base import InpaintModel
@ -98,25 +99,27 @@ class SD(InpaintModel):
# image = torch.from_numpy(image).unsqueeze(0).to(self.device) # image = torch.from_numpy(image).unsqueeze(0).to(self.device)
# mask = torch.from_numpy(mask).unsqueeze(0).to(self.device) # mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
scheduler_kwargs = dict(
beta_schedule="scaled_linear",
beta_start=0.00085,
beta_end=0.012,
num_train_timesteps=1000,
)
if config.sd_sampler == SDSampler.ddim: if config.sd_sampler == SDSampler.ddim:
scheduler = DDIMScheduler( scheduler = DDIMScheduler(
beta_start=0.00085, **scheduler_kwargs,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False, clip_sample=False,
set_alpha_to_one=False, set_alpha_to_one=False,
) )
elif config.sd_sampler == SDSampler.pndm: elif config.sd_sampler == SDSampler.pndm:
PNDM_kwargs = { scheduler = PNDMScheduler(**scheduler_kwargs, skip_prk_steps=True)
"beta_schedule": "scaled_linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"num_train_timesteps": 1000,
"skip_prk_steps": True,
}
scheduler = PNDMScheduler(**PNDM_kwargs)
elif config.sd_sampler == SDSampler.k_lms: elif config.sd_sampler == SDSampler.k_lms:
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear") scheduler = LMSDiscreteScheduler(**scheduler_kwargs)
elif config.sd_sampler == SDSampler.k_euler:
scheduler = EulerDiscreteScheduler(**scheduler_kwargs)
elif config.sd_sampler == SDSampler.k_euler_a:
scheduler = EulerAncestralDiscreteScheduler(**scheduler_kwargs)
else: else:
raise ValueError(config.sd_sampler) raise ValueError(config.sd_sampler)

View File

@ -18,6 +18,8 @@ class SDSampler(str, Enum):
ddim = "ddim" ddim = "ddim"
pndm = "pndm" pndm = "pndm"
k_lms = "k_lms" k_lms = "k_lms"
k_euler = 'k_euler'
k_euler_a = 'k_euler_a'
class Config(BaseModel): class Config(BaseModel):

View File

@ -161,79 +161,6 @@ def test_fcf(strategy):
) )
@pytest.mark.parametrize("sd_device", ['cpu', 'cuda'])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim, SDSampler.pndm, SDSampler.k_lms])
@pytest.mark.parametrize("cpu_textencoder", [True, False])
@pytest.mark.parametrize("disable_nsfw", [True, False])
def test_runway_sd_1_5(sd_device, strategy, sampler, cpu_textencoder, disable_nsfw):
def callback(i, t, latents):
print(f"sd_step_{i}")
if sd_device == 'cuda' and not torch.cuda.is_available():
return
sd_steps = 50
model = ModelManager(name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
sd_disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
callback=callback)
cfg = get_config(strategy, prompt='a fox sitting on a bench', sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3
)
@pytest.mark.parametrize("sd_device", ['cuda'])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_sd_1_5_negative_prompt(sd_device, strategy, sampler):
def callback(i, t, latents):
pass
if sd_device == 'cuda' and not torch.cuda.is_available():
return
sd_steps = 50
model = ModelManager(name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
sd_disable_nsfw=True,
sd_cpu_textencoder=True,
callback=callback)
cfg = get_config(
strategy,
sd_steps=sd_steps,
prompt='Face of a fox, high resolution, sitting on a park bench',
negative_prompt='orange, yellow, small',
sd_sampler=sampler
)
name = f"{sampler}_negative_prompt"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1
)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP] "strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
) )

View File

@ -0,0 +1,124 @@
import os
from pathlib import Path
import cv2
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import Config, HDStrategy, LDMSampler, SDSampler
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / 'result'
save_dir.mkdir(exist_ok=True, parents=True)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
@pytest.mark.parametrize("sd_device", ['cpu', 'cuda'])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
@pytest.mark.parametrize("cpu_textencoder", [True, False])
@pytest.mark.parametrize("disable_nsfw", [True, False])
def test_runway_sd_1_5_ddim(sd_device, strategy, sampler, cpu_textencoder, disable_nsfw):
def callback(i, t, latents):
print(f"sd_step_{i}")
if sd_device == 'cuda' and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == 'cuda' else 1
model = ModelManager(name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
sd_disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
callback=callback)
cfg = get_config(strategy, prompt='a fox sitting on a bench', sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3
)
@pytest.mark.parametrize("sd_device", ['cuda'])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.pndm, SDSampler.k_lms, SDSampler.k_euler, SDSampler.k_euler_a])
@pytest.mark.parametrize("cpu_textencoder", [False])
@pytest.mark.parametrize("disable_nsfw", [True])
def test_runway_sd_1_5(sd_device, strategy, sampler, cpu_textencoder, disable_nsfw):
def callback(i, t, latents):
print(f"sd_step_{i}")
if sd_device == 'cuda' and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == 'cuda' else 1
model = ModelManager(name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
sd_disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
callback=callback)
cfg = get_config(strategy, prompt='a fox sitting on a bench', sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3
)
@pytest.mark.parametrize("sd_device", ['cuda'])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_sd_1_5_negative_prompt(sd_device, strategy, sampler):
def callback(i, t, latents):
pass
if sd_device == 'cuda' and not torch.cuda.is_available():
return
sd_steps = 50
model = ModelManager(name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
sd_disable_nsfw=True,
sd_cpu_textencoder=True,
callback=callback)
cfg = get_config(
strategy,
sd_steps=sd_steps,
prompt='Face of a fox, high resolution, sitting on a park bench',
negative_prompt='orange, yellow, small',
sd_sampler=sampler
)
name = f"{sampler}_negative_prompt"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1
)