make mask blur work

This commit is contained in:
Qing 2022-09-22 21:50:41 +08:00
parent 19f1f07c95
commit 26d938839c
5 changed files with 19 additions and 13 deletions

View File

@ -39,11 +39,11 @@ export default async function inpaint(
fd.append('croperHeight', croperRect.height.toString())
fd.append('croperWidth', croperRect.width.toString())
fd.append('useCroper', settings.showCroper ? 'true' : 'false')
fd.append('sdMaskBlur', settings.sdMaskBlur.toString())
fd.append('sdStrength', settings.sdStrength.toString())
fd.append('sdSteps', settings.sdSteps.toString())
fd.append('sdGuidanceScale', settings.sdGuidanceScale.toString())
fd.append('sdSampler', settings.sdSampler.toString())
// fd.append('sdSeed', settings.sdSeedFixed ? settings.sdSeed.toString() : '-1')
fd.append('sdSeed', seed ? seed.toString() : '-1')
if (sizeLimit === undefined) {

View File

@ -231,7 +231,7 @@ export const settingStateDefault: Settings = {
zitsWireframe: true,
// SD
sdMaskBlur: 0,
sdMaskBlur: 5,
sdMode: SDMode.inpainting,
sdStrength: 0.75,
sdSteps: 50,

View File

@ -48,7 +48,7 @@ class SD(InpaintModel):
self.model = StableDiffusionInpaintPipeline.from_pretrained(
self.model_id_or_path,
revision="fp16" if torch.cuda.is_available() else 'main',
revision="fp16" if torch.cuda.is_available() else "main",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
use_auth_token=kwargs["hf_access_token"],
)
@ -75,7 +75,7 @@ class SD(InpaintModel):
#
# image = torch.from_numpy(image).unsqueeze(0).to(self.device)
# mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
if config.sd_sampler == SDSampler.ddim:
scheduler = DDIMScheduler(
beta_start=0.00085,
@ -91,7 +91,7 @@ class SD(InpaintModel):
"beta_start": 0.00085,
"beta_end": 0.012,
"num_train_timesteps": 1000,
"skip_prk_steps": True
"skip_prk_steps": True,
}
scheduler = PNDMScheduler(**PNDM_kwargs)
else:
@ -105,6 +105,10 @@ class SD(InpaintModel):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if config.sd_mask_blur != 0:
k = 2 * config.sd_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)[:, :, np.newaxis]
output = self.model(
prompt=config.prompt,
init_image=PIL.Image.fromarray(image),

View File

@ -4,19 +4,19 @@ from pydantic import BaseModel
class HDStrategy(str, Enum):
ORIGINAL = 'Original'
RESIZE = 'Resize'
CROP = 'Crop'
ORIGINAL = "Original"
RESIZE = "Resize"
CROP = "Crop"
class LDMSampler(str, Enum):
ddim = 'ddim'
plms = 'plms'
ddim = "ddim"
plms = "plms"
class SDSampler(str, Enum):
ddim = 'ddim'
pndm = 'pndm'
ddim = "ddim"
pndm = "pndm"
class Config(BaseModel):
@ -28,7 +28,7 @@ class Config(BaseModel):
hd_strategy_crop_trigger_size: int
hd_strategy_resize_limit: int
prompt: str = ''
prompt: str = ""
# 始终是在原图尺度上的值
use_croper: bool = False
croper_x: int = None
@ -37,6 +37,7 @@ class Config(BaseModel):
croper_width: int = None
# sd
sd_mask_blur: int = 0
sd_strength: float = 0.75
sd_steps: int = 50
sd_guidance_scale: float = 7.5

View File

@ -118,6 +118,7 @@ def process():
croper_y=form["croperY"],
croper_height=form["croperHeight"],
croper_width=form["croperWidth"],
sd_mask_blur=form["sdMaskBlur"],
sd_strength=form["sdStrength"],
sd_steps=form["sdSteps"],
sd_guidance_scale=form["sdGuidanceScale"],