2023-12-01 03:15:35 +01:00
|
|
|
import torch
|
2024-01-09 15:58:21 +01:00
|
|
|
from transformers import PreTrainedModel
|
|
|
|
|
2024-01-05 09:40:06 +01:00
|
|
|
from ..utils import torch_gc
|
2023-12-01 03:15:35 +01:00
|
|
|
|
|
|
|
|
2024-01-09 15:58:21 +01:00
|
|
|
class CPUTextEncoderWrapper(PreTrainedModel):
|
2023-12-01 03:15:35 +01:00
|
|
|
def __init__(self, text_encoder, torch_dtype):
|
2024-01-09 15:58:21 +01:00
|
|
|
super().__init__(text_encoder.config)
|
2023-12-01 03:15:35 +01:00
|
|
|
self.config = text_encoder.config
|
2024-01-10 06:34:11 +01:00
|
|
|
# cpu not support float16
|
2023-12-01 03:15:35 +01:00
|
|
|
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
|
|
|
|
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
|
|
|
|
self.torch_dtype = torch_dtype
|
|
|
|
del text_encoder
|
|
|
|
torch_gc()
|
|
|
|
|
|
|
|
def __call__(self, x, **kwargs):
|
|
|
|
input_device = x.device
|
2024-01-10 06:34:11 +01:00
|
|
|
original_output = self.text_encoder(x.to(self.text_encoder.device), **kwargs)
|
|
|
|
for k, v in original_output.items():
|
|
|
|
if isinstance(v, tuple):
|
|
|
|
original_output[k] = [
|
|
|
|
v[i].to(input_device).to(self.torch_dtype) for i in range(len(v))
|
|
|
|
]
|
|
|
|
else:
|
|
|
|
original_output[k] = v.to(input_device).to(self.torch_dtype)
|
|
|
|
return original_output
|
2023-12-01 03:15:35 +01:00
|
|
|
|
|
|
|
@property
|
|
|
|
def dtype(self):
|
|
|
|
return self.torch_dtype
|