clean code: get_torch_dtype; mps use float16 by default

This commit is contained in:
Qing 2024-01-08 23:53:20 +08:00
parent 6f4ce66793
commit a71c3fbe1b
8 changed files with 23 additions and 28 deletions

View File

@ -13,7 +13,7 @@ from .helper.controlnet_preprocess import (
make_inpaint_control_image, make_inpaint_control_image,
) )
from .helper.cpu_text_encoder import CPUTextEncoderWrapper from .helper.cpu_text_encoder import CPUTextEncoderWrapper
from .utils import get_scheduler, handle_from_pretrained_exceptions from .utils import get_scheduler, handle_from_pretrained_exceptions, get_torch_dtype
class ControlNet(DiffusionInpaintModel): class ControlNet(DiffusionInpaintModel):
@ -36,7 +36,6 @@ class ControlNet(DiffusionInpaintModel):
raise NotImplementedError(f"Unsupported controlnet lcm model {self.model_info}") raise NotImplementedError(f"Unsupported controlnet lcm model {self.model_info}")
def init_model(self, device: torch.device, **kwargs): def init_model(self, device: torch.device, **kwargs):
fp16 = not kwargs.get("no_half", False)
model_info = kwargs["model_info"] model_info = kwargs["model_info"]
controlnet_method = kwargs["controlnet_method"] controlnet_method = kwargs["controlnet_method"]
@ -54,8 +53,7 @@ class ControlNet(DiffusionInpaintModel):
) )
) )
use_gpu = device == torch.device("cuda") and torch.cuda.is_available() use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False))
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
self.torch_dtype = torch_dtype self.torch_dtype = torch_dtype
if model_info.model_type in [ if model_info.model_type in [

View File

@ -6,6 +6,7 @@ from loguru import logger
from iopaint.const import INSTRUCT_PIX2PIX_NAME from iopaint.const import INSTRUCT_PIX2PIX_NAME
from .base import DiffusionInpaintModel from .base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest from iopaint.schema import InpaintRequest
from .utils import get_torch_dtype
class InstructPix2Pix(DiffusionInpaintModel): class InstructPix2Pix(DiffusionInpaintModel):
@ -16,7 +17,7 @@ class InstructPix2Pix(DiffusionInpaintModel):
def init_model(self, device: torch.device, **kwargs): def init_model(self, device: torch.device, **kwargs):
from diffusers import StableDiffusionInstructPix2PixPipeline from diffusers import StableDiffusionInstructPix2PixPipeline
fp16 = not kwargs.get("no_half", False) use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False))
model_kwargs = {} model_kwargs = {}
if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False):
@ -29,8 +30,6 @@ class InstructPix2Pix(DiffusionInpaintModel):
) )
) )
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
self.model = StableDiffusionInstructPix2PixPipeline.from_pretrained( self.model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
self.name, variant="fp16", torch_dtype=torch_dtype, **model_kwargs self.name, variant="fp16", torch_dtype=torch_dtype, **model_kwargs
) )

View File

@ -6,6 +6,7 @@ import torch
from iopaint.const import KANDINSKY22_NAME from iopaint.const import KANDINSKY22_NAME
from .base import DiffusionInpaintModel from .base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest from iopaint.schema import InpaintRequest
from .utils import get_torch_dtype
class Kandinsky(DiffusionInpaintModel): class Kandinsky(DiffusionInpaintModel):
@ -15,9 +16,7 @@ class Kandinsky(DiffusionInpaintModel):
def init_model(self, device: torch.device, **kwargs): def init_model(self, device: torch.device, **kwargs):
from diffusers import AutoPipelineForInpainting from diffusers import AutoPipelineForInpainting
fp16 = not kwargs.get("no_half", False) use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False))
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
model_kwargs = { model_kwargs = {
"torch_dtype": torch_dtype, "torch_dtype": torch_dtype,

View File

@ -7,6 +7,7 @@ from loguru import logger
from iopaint.helper import decode_base64_to_image from iopaint.helper import decode_base64_to_image
from .base import DiffusionInpaintModel from .base import DiffusionInpaintModel
from iopaint.schema import InpaintRequest from iopaint.schema import InpaintRequest
from .utils import get_torch_dtype
class PaintByExample(DiffusionInpaintModel): class PaintByExample(DiffusionInpaintModel):
@ -17,9 +18,7 @@ class PaintByExample(DiffusionInpaintModel):
def init_model(self, device: torch.device, **kwargs): def init_model(self, device: torch.device, **kwargs):
from diffusers import DiffusionPipeline from diffusers import DiffusionPipeline
fp16 = not kwargs.get("no_half", False) use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False))
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
model_kwargs = {} model_kwargs = {}
if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False):

View File

@ -6,7 +6,7 @@ from loguru import logger
from ..base import DiffusionInpaintModel from ..base import DiffusionInpaintModel
from ..helper.cpu_text_encoder import CPUTextEncoderWrapper from ..helper.cpu_text_encoder import CPUTextEncoderWrapper
from ..utils import handle_from_pretrained_exceptions from ..utils import handle_from_pretrained_exceptions, get_torch_dtype
from iopaint.schema import InpaintRequest from iopaint.schema import InpaintRequest
from .powerpaint_tokenizer import add_task_to_prompt from .powerpaint_tokenizer import add_task_to_prompt
from ...const import POWERPAINT_NAME from ...const import POWERPAINT_NAME
@ -22,7 +22,7 @@ class PowerPaint(DiffusionInpaintModel):
from .pipeline_powerpaint import StableDiffusionInpaintPipeline from .pipeline_powerpaint import StableDiffusionInpaintPipeline
from .powerpaint_tokenizer import PowerPaintTokenizer from .powerpaint_tokenizer import PowerPaintTokenizer
fp16 = not kwargs.get("no_half", False) use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False))
model_kwargs = {} model_kwargs = {}
if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False):
logger.info("Disable Stable Diffusion Model NSFW checker") logger.info("Disable Stable Diffusion Model NSFW checker")
@ -34,9 +34,6 @@ class PowerPaint(DiffusionInpaintModel):
) )
) )
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
self.model = handle_from_pretrained_exceptions( self.model = handle_from_pretrained_exceptions(
StableDiffusionInpaintPipeline.from_pretrained, StableDiffusionInpaintPipeline.from_pretrained,
pretrained_model_name_or_path=self.name, pretrained_model_name_or_path=self.name,

View File

@ -5,7 +5,7 @@ from loguru import logger
from .base import DiffusionInpaintModel from .base import DiffusionInpaintModel
from .helper.cpu_text_encoder import CPUTextEncoderWrapper from .helper.cpu_text_encoder import CPUTextEncoderWrapper
from .utils import handle_from_pretrained_exceptions from .utils import handle_from_pretrained_exceptions, get_torch_dtype
from iopaint.schema import InpaintRequest, ModelType from iopaint.schema import InpaintRequest, ModelType
@ -17,7 +17,7 @@ class SD(DiffusionInpaintModel):
def init_model(self, device: torch.device, **kwargs): def init_model(self, device: torch.device, **kwargs):
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
fp16 = not kwargs.get("no_half", False) use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False))
model_kwargs = {} model_kwargs = {}
if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False): if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False):
@ -29,8 +29,6 @@ class SD(DiffusionInpaintModel):
requires_safety_checker=False, requires_safety_checker=False,
) )
) )
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
if self.model_info.is_single_file_diffusers: if self.model_info.is_single_file_diffusers:
if self.model_info.model_type == ModelType.DIFFUSERS_SD: if self.model_info.model_type == ModelType.DIFFUSERS_SD:

View File

@ -9,7 +9,7 @@ from loguru import logger
from iopaint.schema import InpaintRequest, ModelType from iopaint.schema import InpaintRequest, ModelType
from .base import DiffusionInpaintModel from .base import DiffusionInpaintModel
from .utils import handle_from_pretrained_exceptions from .utils import handle_from_pretrained_exceptions, get_torch_dtype
class SDXL(DiffusionInpaintModel): class SDXL(DiffusionInpaintModel):
@ -22,10 +22,7 @@ class SDXL(DiffusionInpaintModel):
def init_model(self, device: torch.device, **kwargs): def init_model(self, device: torch.device, **kwargs):
from diffusers.pipelines import StableDiffusionXLInpaintPipeline from diffusers.pipelines import StableDiffusionXLInpaintPipeline
fp16 = not kwargs.get("no_half", False) use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False))
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
if self.model_info.model_type == ModelType.DIFFUSERS_SDXL: if self.model_info.model_type == ModelType.DIFFUSERS_SDXL:
num_in_channels = 4 num_in_channels = 4

View File

@ -1,4 +1,3 @@
import copy
import gc import gc
import math import math
import random import random
@ -994,3 +993,12 @@ def handle_from_pretrained_exceptions(func, **kwargs):
raise e raise e
except Exception as e: except Exception as e:
raise e raise e
def get_torch_dtype(device, no_half: bool):
device = str(device)
use_fp16 = not no_half
use_gpu = device == "cuda"
if device in ["cuda", "mps"] and use_fp16:
return use_gpu, torch.float16
return use_gpu, torch.float32