IOPaint/iopaint/model/power_paint/v2/unet_2d_blocks.py

343 lines
12 KiB
Python
Raw Normal View History

2024-04-24 14:22:29 +02:00
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
2024-04-29 16:20:44 +02:00
from typing import Any, Dict, Optional, Tuple
2024-04-24 14:22:29 +02:00
import torch
from diffusers.utils import is_torch_version, logging
from diffusers.utils.torch_utils import apply_freeu
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
2024-04-29 16:20:44 +02:00
def CrossAttnDownBlock2D_forward(
self,
hidden_states: torch.FloatTensor,
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
additional_residuals: Optional[torch.FloatTensor] = None,
down_block_add_samples: Optional[torch.FloatTensor] = None,
) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]:
output_states = ()
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
blocks = list(zip(self.resnets, self.attentions))
for i, (resnet, attn) in enumerate(blocks):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
return custom_forward
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
2024-04-24 14:22:29 +02:00
)
2024-04-29 16:20:44 +02:00
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
2024-04-24 14:22:29 +02:00
)
2024-04-29 16:20:44 +02:00
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
2024-04-24 14:22:29 +02:00
else:
2024-04-29 16:20:44 +02:00
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
2024-04-24 14:22:29 +02:00
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
2024-04-29 16:20:44 +02:00
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
# apply additional residuals to the output of the last pair of resnet and attention blocks
if i == len(blocks) - 1 and additional_residuals is not None:
hidden_states = hidden_states + additional_residuals
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if down_block_add_samples is not None:
hidden_states = hidden_states + down_block_add_samples.pop(0)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
output_states = output_states + (hidden_states,)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states, scale=lora_scale)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if down_block_add_samples is not None:
hidden_states = hidden_states + down_block_add_samples.pop(
0
) # todo: add before or after
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
output_states = output_states + (hidden_states,)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
return hidden_states, output_states
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
def DownBlock2D_forward(
self,
hidden_states: torch.FloatTensor,
temb: Optional[torch.FloatTensor] = None,
scale: float = 1.0,
down_block_add_samples: Optional[torch.FloatTensor] = None,
) -> Tuple[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]:
output_states = ()
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
for resnet in self.resnets:
if self.training and self.gradient_checkpointing:
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
return custom_forward
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if is_torch_version(">=", "1.11.0"):
2024-04-24 14:22:29 +02:00
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
2024-04-29 16:20:44 +02:00
use_reentrant=False,
2024-04-24 14:22:29 +02:00
)
else:
2024-04-29 16:20:44 +02:00
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
)
else:
hidden_states = resnet(hidden_states, temb, scale=scale)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if down_block_add_samples is not None:
hidden_states = hidden_states + down_block_add_samples.pop(0)
output_states = output_states + (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states, scale=scale)
if down_block_add_samples is not None:
hidden_states = hidden_states + down_block_add_samples.pop(
0
) # todo: add before or after
output_states = output_states + (hidden_states,)
return hidden_states, output_states
def CrossAttnUpBlock2D_forward(
self,
hidden_states: torch.FloatTensor,
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
return_res_samples: Optional[bool] = False,
up_block_add_samples: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
lora_scale = (
cross_attention_kwargs.get("scale", 1.0)
if cross_attention_kwargs is not None
else 1.0
)
is_freeu_enabled = (
getattr(self, "s1", None)
and getattr(self, "s2", None)
and getattr(self, "b1", None)
and getattr(self, "b2", None)
)
if return_res_samples:
output_states = ()
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
# FreeU: Only operate on the first two stages
if is_freeu_enabled:
hidden_states, res_hidden_states = apply_freeu(
self.resolution_idx,
hidden_states,
res_hidden_states,
s1=self.s1,
s2=self.s2,
b1=self.b1,
b2=self.b2,
2024-04-24 14:22:29 +02:00
)
2024-04-29 16:20:44 +02:00
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
return custom_forward
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
ckpt_kwargs: Dict[str, Any] = (
{"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
2024-04-24 14:22:29 +02:00
)
2024-04-29 16:20:44 +02:00
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
2024-04-24 14:22:29 +02:00
)
2024-04-29 16:20:44 +02:00
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
2024-04-24 14:22:29 +02:00
else:
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
2024-04-29 16:20:44 +02:00
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
if return_res_samples:
2024-04-24 14:22:29 +02:00
output_states = output_states + (hidden_states,)
2024-04-29 16:20:44 +02:00
if up_block_add_samples is not None:
hidden_states = hidden_states + up_block_add_samples.pop(0)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size, scale=lora_scale)
if return_res_samples:
output_states = output_states + (hidden_states,)
if up_block_add_samples is not None:
hidden_states = hidden_states + up_block_add_samples.pop(0)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if return_res_samples:
2024-04-24 14:22:29 +02:00
return hidden_states, output_states
2024-04-29 16:20:44 +02:00
else:
return hidden_states
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
def UpBlock2D_forward(
self,
hidden_states: torch.FloatTensor,
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
temb: Optional[torch.FloatTensor] = None,
upsample_size: Optional[int] = None,
scale: float = 1.0,
return_res_samples: Optional[bool] = False,
up_block_add_samples: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
is_freeu_enabled = (
getattr(self, "s1", None)
and getattr(self, "s2", None)
and getattr(self, "b1", None)
and getattr(self, "b2", None)
)
if return_res_samples:
output_states = ()
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
for resnet in self.resnets:
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
# FreeU: Only operate on the first two stages
if is_freeu_enabled:
hidden_states, res_hidden_states = apply_freeu(
self.resolution_idx,
hidden_states,
res_hidden_states,
s1=self.s1,
s2=self.s2,
b1=self.b1,
b2=self.b2,
2024-04-24 14:22:29 +02:00
)
2024-04-29 16:20:44 +02:00
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if self.training and self.gradient_checkpointing:
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
return custom_forward
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if is_torch_version(">=", "1.11.0"):
2024-04-24 14:22:29 +02:00
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
2024-04-29 16:20:44 +02:00
use_reentrant=False,
2024-04-24 14:22:29 +02:00
)
else:
2024-04-29 16:20:44 +02:00
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet), hidden_states, temb
2024-04-24 14:22:29 +02:00
)
else:
2024-04-29 16:20:44 +02:00
hidden_states = resnet(hidden_states, temb, scale=scale)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if return_res_samples:
2024-04-24 14:22:29 +02:00
output_states = output_states + (hidden_states,)
2024-04-29 16:20:44 +02:00
if up_block_add_samples is not None:
hidden_states = hidden_states + up_block_add_samples.pop(
0
) # todo: add before or after
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size, scale=scale)
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if return_res_samples:
2024-04-24 14:22:29 +02:00
output_states = output_states + (hidden_states,)
2024-04-29 16:20:44 +02:00
if up_block_add_samples is not None:
hidden_states = hidden_states + up_block_add_samples.pop(
0
) # todo: add before or after
2024-04-24 14:22:29 +02:00
2024-04-29 16:20:44 +02:00
if return_res_samples:
2024-04-24 14:22:29 +02:00
return hidden_states, output_states
2024-04-29 16:20:44 +02:00
else:
2024-04-24 14:22:29 +02:00
return hidden_states