IOPaint/iopaint/model/helper/cpu_text_encoder.py

41 lines
1.4 KiB
Python
Raw Normal View History

2023-12-01 03:15:35 +01:00
import torch
from transformers import PreTrainedModel
2024-01-05 09:40:06 +01:00
from ..utils import torch_gc
2023-12-01 03:15:35 +01:00
class CPUTextEncoderWrapper(PreTrainedModel):
2023-12-01 03:15:35 +01:00
def __init__(self, text_encoder, torch_dtype):
super().__init__(text_encoder.config)
2023-12-01 03:15:35 +01:00
self.config = text_encoder.config
2024-02-10 03:59:06 +01:00
self._device = text_encoder.device
2024-01-10 06:34:11 +01:00
# cpu not support float16
2023-12-01 03:15:35 +01:00
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
self.torch_dtype = torch_dtype
del text_encoder
torch_gc()
def __call__(self, x, **kwargs):
input_device = x.device
2024-01-10 06:34:11 +01:00
original_output = self.text_encoder(x.to(self.text_encoder.device), **kwargs)
for k, v in original_output.items():
if isinstance(v, tuple):
original_output[k] = [
v[i].to(input_device).to(self.torch_dtype) for i in range(len(v))
]
else:
original_output[k] = v.to(input_device).to(self.torch_dtype)
return original_output
2023-12-01 03:15:35 +01:00
@property
def dtype(self):
return self.torch_dtype
2024-02-10 03:59:06 +01:00
@property
def device(self) -> torch.device:
"""
`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
return self._device