add MobileSAM vit_t
This commit is contained in:
parent
94211a4985
commit
557e28aff9
@ -12,6 +12,7 @@ MPS_SUPPORT_MODELS = [
|
|||||||
"sd2",
|
"sd2",
|
||||||
"paint_by_example",
|
"paint_by_example",
|
||||||
"controlnet",
|
"controlnet",
|
||||||
|
"kandinsky2.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
DEFAULT_MODEL = "lama"
|
DEFAULT_MODEL = "lama"
|
||||||
@ -29,7 +30,7 @@ AVAILABLE_MODELS = [
|
|||||||
"sd2",
|
"sd2",
|
||||||
"paint_by_example",
|
"paint_by_example",
|
||||||
"instruct_pix2pix",
|
"instruct_pix2pix",
|
||||||
"kandinsky2.1"
|
"kandinsky2.2",
|
||||||
]
|
]
|
||||||
SD15_MODELS = ["sd1.5", "anything4", "realisticVision1.4"]
|
SD15_MODELS = ["sd1.5", "anything4", "realisticVision1.4"]
|
||||||
|
|
||||||
@ -61,7 +62,7 @@ SD_CONTROLNET_CHOICES = [
|
|||||||
"control_v11p_sd15_canny",
|
"control_v11p_sd15_canny",
|
||||||
"control_v11p_sd15_openpose",
|
"control_v11p_sd15_openpose",
|
||||||
"control_v11p_sd15_inpaint",
|
"control_v11p_sd15_inpaint",
|
||||||
"control_v11f1p_sd15_depth"
|
"control_v11f1p_sd15_depth",
|
||||||
]
|
]
|
||||||
|
|
||||||
SD_LOCAL_MODEL_HELP = """
|
SD_LOCAL_MODEL_HELP = """
|
||||||
@ -115,7 +116,7 @@ RealESRGANModelNameList = [e.value for e in RealESRGANModelName]
|
|||||||
|
|
||||||
INTERACTIVE_SEG_HELP = "Enable interactive segmentation using Segment Anything."
|
INTERACTIVE_SEG_HELP = "Enable interactive segmentation using Segment Anything."
|
||||||
INTERACTIVE_SEG_MODEL_HELP = "Model size: vit_b < vit_l < vit_h. Bigger model size means better segmentation but slower speed."
|
INTERACTIVE_SEG_MODEL_HELP = "Model size: vit_b < vit_l < vit_h. Bigger model size means better segmentation but slower speed."
|
||||||
AVAILABLE_INTERACTIVE_SEG_MODELS = ["vit_b", "vit_l", "vit_h"]
|
AVAILABLE_INTERACTIVE_SEG_MODELS = ["vit_b", "vit_l", "vit_h", "vit_t"]
|
||||||
AVAILABLE_INTERACTIVE_SEG_DEVICES = ["cuda", "cpu", "mps"]
|
AVAILABLE_INTERACTIVE_SEG_DEVICES = ["cuda", "cpu", "mps"]
|
||||||
REMOVE_BG_HELP = "Enable remove background. Always run on CPU"
|
REMOVE_BG_HELP = "Enable remove background. Always run on CPU"
|
||||||
ANIMESEG_HELP = "Enable anime segmentation. Always run on CPU"
|
ANIMESEG_HELP = "Enable anime segmentation. Always run on CPU"
|
||||||
|
@ -22,6 +22,10 @@ SEGMENT_ANYTHING_MODELS = {
|
|||||||
"url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth",
|
"url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth",
|
||||||
"md5": "4b8939a88964f0f4ff5f5b2642c598a6",
|
"md5": "4b8939a88964f0f4ff5f5b2642c598a6",
|
||||||
},
|
},
|
||||||
|
"vit_t": {
|
||||||
|
"url": "https://github.com/Sanster/models/releases/download/MobileSAM/mobile_sam.pt",
|
||||||
|
"md5": "f3c0d8cda613564d499310dab6c812cd",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -8,7 +8,15 @@ import torch
|
|||||||
|
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
|
from lama_cleaner.plugins.segment_anything.modeling.tiny_vit_sam import TinyViT
|
||||||
|
|
||||||
|
from .modeling import (
|
||||||
|
ImageEncoderViT,
|
||||||
|
MaskDecoder,
|
||||||
|
PromptEncoder,
|
||||||
|
Sam,
|
||||||
|
TwoWayTransformer,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def build_sam_vit_h(checkpoint=None):
|
def build_sam_vit_h(checkpoint=None):
|
||||||
@ -44,11 +52,64 @@ def build_sam_vit_b(checkpoint=None):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_sam_vit_t(checkpoint=None):
|
||||||
|
prompt_embed_dim = 256
|
||||||
|
image_size = 1024
|
||||||
|
vit_patch_size = 16
|
||||||
|
image_embedding_size = image_size // vit_patch_size
|
||||||
|
mobile_sam = Sam(
|
||||||
|
image_encoder=TinyViT(
|
||||||
|
img_size=1024,
|
||||||
|
in_chans=3,
|
||||||
|
num_classes=1000,
|
||||||
|
embed_dims=[64, 128, 160, 320],
|
||||||
|
depths=[2, 2, 6, 2],
|
||||||
|
num_heads=[2, 4, 5, 10],
|
||||||
|
window_sizes=[7, 7, 14, 7],
|
||||||
|
mlp_ratio=4.0,
|
||||||
|
drop_rate=0.0,
|
||||||
|
drop_path_rate=0.0,
|
||||||
|
use_checkpoint=False,
|
||||||
|
mbconv_expand_ratio=4.0,
|
||||||
|
local_conv_size=3,
|
||||||
|
layer_lr_decay=0.8,
|
||||||
|
),
|
||||||
|
prompt_encoder=PromptEncoder(
|
||||||
|
embed_dim=prompt_embed_dim,
|
||||||
|
image_embedding_size=(image_embedding_size, image_embedding_size),
|
||||||
|
input_image_size=(image_size, image_size),
|
||||||
|
mask_in_chans=16,
|
||||||
|
),
|
||||||
|
mask_decoder=MaskDecoder(
|
||||||
|
num_multimask_outputs=3,
|
||||||
|
transformer=TwoWayTransformer(
|
||||||
|
depth=2,
|
||||||
|
embedding_dim=prompt_embed_dim,
|
||||||
|
mlp_dim=2048,
|
||||||
|
num_heads=8,
|
||||||
|
),
|
||||||
|
transformer_dim=prompt_embed_dim,
|
||||||
|
iou_head_depth=3,
|
||||||
|
iou_head_hidden_dim=256,
|
||||||
|
),
|
||||||
|
pixel_mean=[123.675, 116.28, 103.53],
|
||||||
|
pixel_std=[58.395, 57.12, 57.375],
|
||||||
|
)
|
||||||
|
|
||||||
|
mobile_sam.eval()
|
||||||
|
if checkpoint is not None:
|
||||||
|
with open(checkpoint, "rb") as f:
|
||||||
|
state_dict = torch.load(f)
|
||||||
|
mobile_sam.load_state_dict(state_dict)
|
||||||
|
return mobile_sam
|
||||||
|
|
||||||
|
|
||||||
sam_model_registry = {
|
sam_model_registry = {
|
||||||
"default": build_sam,
|
"default": build_sam,
|
||||||
"vit_h": build_sam,
|
"vit_h": build_sam,
|
||||||
"vit_l": build_sam_vit_l,
|
"vit_l": build_sam_vit_l,
|
||||||
"vit_b": build_sam_vit_b,
|
"vit_b": build_sam_vit_b,
|
||||||
|
"vit_t": build_sam_vit_t,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
822
lama_cleaner/plugins/segment_anything/modeling/tiny_vit_sam.py
Normal file
822
lama_cleaner/plugins/segment_anything/modeling/tiny_vit_sam.py
Normal file
@ -0,0 +1,822 @@
|
|||||||
|
# --------------------------------------------------------
|
||||||
|
# TinyViT Model Architecture
|
||||||
|
# Copyright (c) 2022 Microsoft
|
||||||
|
# Adapted from LeViT and Swin Transformer
|
||||||
|
# LeViT: (https://github.com/facebookresearch/levit)
|
||||||
|
# Swin: (https://github.com/microsoft/swin-transformer)
|
||||||
|
# Build the TinyViT Model
|
||||||
|
# --------------------------------------------------------
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import itertools
|
||||||
|
import math
|
||||||
|
import warnings
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
import torch.utils.checkpoint as checkpoint
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
|
||||||
|
def _ntuple(n):
|
||||||
|
def parse(x):
|
||||||
|
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
|
||||||
|
return x
|
||||||
|
return tuple(itertools.repeat(x, n))
|
||||||
|
|
||||||
|
return parse
|
||||||
|
|
||||||
|
|
||||||
|
to_2tuple = _ntuple(2)
|
||||||
|
|
||||||
|
|
||||||
|
def _trunc_normal_(tensor, mean, std, a, b):
|
||||||
|
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
||||||
|
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
||||||
|
def norm_cdf(x):
|
||||||
|
# Computes standard normal cumulative distribution function
|
||||||
|
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
|
||||||
|
|
||||||
|
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
||||||
|
warnings.warn(
|
||||||
|
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
||||||
|
"The distribution of values may be incorrect.",
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Values are generated by using a truncated uniform distribution and
|
||||||
|
# then using the inverse CDF for the normal distribution.
|
||||||
|
# Get upper and lower cdf values
|
||||||
|
l = norm_cdf((a - mean) / std)
|
||||||
|
u = norm_cdf((b - mean) / std)
|
||||||
|
|
||||||
|
# Uniformly fill tensor with values from [l, u], then translate to
|
||||||
|
# [2l-1, 2u-1].
|
||||||
|
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
||||||
|
|
||||||
|
# Use inverse cdf transform for normal distribution to get truncated
|
||||||
|
# standard normal
|
||||||
|
tensor.erfinv_()
|
||||||
|
|
||||||
|
# Transform to proper mean, std
|
||||||
|
tensor.mul_(std * math.sqrt(2.0))
|
||||||
|
tensor.add_(mean)
|
||||||
|
|
||||||
|
# Clamp to ensure it's in the proper range
|
||||||
|
tensor.clamp_(min=a, max=b)
|
||||||
|
return tensor
|
||||||
|
|
||||||
|
|
||||||
|
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
|
||||||
|
# type: (Tensor, float, float, float, float) -> Tensor
|
||||||
|
r"""Fills the input Tensor with values drawn from a truncated
|
||||||
|
normal distribution. The values are effectively drawn from the
|
||||||
|
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
|
||||||
|
with values outside :math:`[a, b]` redrawn until they are within
|
||||||
|
the bounds. The method used for generating the random values works
|
||||||
|
best when :math:`a \leq \text{mean} \leq b`.
|
||||||
|
|
||||||
|
NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are
|
||||||
|
applied while sampling the normal with mean/std applied, therefore a, b args
|
||||||
|
should be adjusted to match the range of mean, std args.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tensor: an n-dimensional `torch.Tensor`
|
||||||
|
mean: the mean of the normal distribution
|
||||||
|
std: the standard deviation of the normal distribution
|
||||||
|
a: the minimum cutoff value
|
||||||
|
b: the maximum cutoff value
|
||||||
|
Examples:
|
||||||
|
>>> w = torch.empty(3, 5)
|
||||||
|
>>> nn.init.trunc_normal_(w)
|
||||||
|
"""
|
||||||
|
with torch.no_grad():
|
||||||
|
return _trunc_normal_(tensor, mean, std, a, b)
|
||||||
|
|
||||||
|
|
||||||
|
def drop_path(
|
||||||
|
x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True
|
||||||
|
):
|
||||||
|
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
||||||
|
|
||||||
|
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
|
||||||
|
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
||||||
|
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
|
||||||
|
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
|
||||||
|
'survival rate' as the argument.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if drop_prob == 0.0 or not training:
|
||||||
|
return x
|
||||||
|
keep_prob = 1 - drop_prob
|
||||||
|
shape = (x.shape[0],) + (1,) * (
|
||||||
|
x.ndim - 1
|
||||||
|
) # work with diff dim tensors, not just 2D ConvNets
|
||||||
|
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
|
||||||
|
if keep_prob > 0.0 and scale_by_keep:
|
||||||
|
random_tensor.div_(keep_prob)
|
||||||
|
return x * random_tensor
|
||||||
|
|
||||||
|
|
||||||
|
class TimmDropPath(nn.Module):
|
||||||
|
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
||||||
|
|
||||||
|
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
|
||||||
|
super(TimmDropPath, self).__init__()
|
||||||
|
self.drop_prob = drop_prob
|
||||||
|
self.scale_by_keep = scale_by_keep
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
|
||||||
|
|
||||||
|
def extra_repr(self):
|
||||||
|
return f"drop_prob={round(self.drop_prob,3):0.3f}"
|
||||||
|
|
||||||
|
|
||||||
|
class Conv2d_BN(torch.nn.Sequential):
|
||||||
|
def __init__(
|
||||||
|
self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.add_module(
|
||||||
|
"c", torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False)
|
||||||
|
)
|
||||||
|
bn = torch.nn.BatchNorm2d(b)
|
||||||
|
torch.nn.init.constant_(bn.weight, bn_weight_init)
|
||||||
|
torch.nn.init.constant_(bn.bias, 0)
|
||||||
|
self.add_module("bn", bn)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def fuse(self):
|
||||||
|
c, bn = self._modules.values()
|
||||||
|
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||||
|
w = c.weight * w[:, None, None, None]
|
||||||
|
b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
|
||||||
|
m = torch.nn.Conv2d(
|
||||||
|
w.size(1) * self.c.groups,
|
||||||
|
w.size(0),
|
||||||
|
w.shape[2:],
|
||||||
|
stride=self.c.stride,
|
||||||
|
padding=self.c.padding,
|
||||||
|
dilation=self.c.dilation,
|
||||||
|
groups=self.c.groups,
|
||||||
|
)
|
||||||
|
m.weight.data.copy_(w)
|
||||||
|
m.bias.data.copy_(b)
|
||||||
|
return m
|
||||||
|
|
||||||
|
|
||||||
|
class DropPath(TimmDropPath):
|
||||||
|
def __init__(self, drop_prob=None):
|
||||||
|
super().__init__(drop_prob=drop_prob)
|
||||||
|
self.drop_prob = drop_prob
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
msg = super().__repr__()
|
||||||
|
msg += f"(drop_prob={self.drop_prob})"
|
||||||
|
return msg
|
||||||
|
|
||||||
|
|
||||||
|
class PatchEmbed(nn.Module):
|
||||||
|
def __init__(self, in_chans, embed_dim, resolution, activation):
|
||||||
|
super().__init__()
|
||||||
|
img_size: Tuple[int, int] = to_2tuple(resolution)
|
||||||
|
self.patches_resolution = (img_size[0] // 4, img_size[1] // 4)
|
||||||
|
self.num_patches = self.patches_resolution[0] * self.patches_resolution[1]
|
||||||
|
self.in_chans = in_chans
|
||||||
|
self.embed_dim = embed_dim
|
||||||
|
n = embed_dim
|
||||||
|
self.seq = nn.Sequential(
|
||||||
|
Conv2d_BN(in_chans, n // 2, 3, 2, 1),
|
||||||
|
activation(),
|
||||||
|
Conv2d_BN(n // 2, n, 3, 2, 1),
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
return self.seq(x)
|
||||||
|
|
||||||
|
|
||||||
|
class MBConv(nn.Module):
|
||||||
|
def __init__(self, in_chans, out_chans, expand_ratio, activation, drop_path):
|
||||||
|
super().__init__()
|
||||||
|
self.in_chans = in_chans
|
||||||
|
self.hidden_chans = int(in_chans * expand_ratio)
|
||||||
|
self.out_chans = out_chans
|
||||||
|
|
||||||
|
self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1)
|
||||||
|
self.act1 = activation()
|
||||||
|
|
||||||
|
self.conv2 = Conv2d_BN(
|
||||||
|
self.hidden_chans,
|
||||||
|
self.hidden_chans,
|
||||||
|
ks=3,
|
||||||
|
stride=1,
|
||||||
|
pad=1,
|
||||||
|
groups=self.hidden_chans,
|
||||||
|
)
|
||||||
|
self.act2 = activation()
|
||||||
|
|
||||||
|
self.conv3 = Conv2d_BN(self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0)
|
||||||
|
self.act3 = activation()
|
||||||
|
|
||||||
|
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
shortcut = x
|
||||||
|
|
||||||
|
x = self.conv1(x)
|
||||||
|
x = self.act1(x)
|
||||||
|
|
||||||
|
x = self.conv2(x)
|
||||||
|
x = self.act2(x)
|
||||||
|
|
||||||
|
x = self.conv3(x)
|
||||||
|
|
||||||
|
x = self.drop_path(x)
|
||||||
|
|
||||||
|
x += shortcut
|
||||||
|
x = self.act3(x)
|
||||||
|
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class PatchMerging(nn.Module):
|
||||||
|
def __init__(self, input_resolution, dim, out_dim, activation):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.input_resolution = input_resolution
|
||||||
|
self.dim = dim
|
||||||
|
self.out_dim = out_dim
|
||||||
|
self.act = activation()
|
||||||
|
self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0)
|
||||||
|
stride_c = 2
|
||||||
|
if out_dim == 320 or out_dim == 448 or out_dim == 576:
|
||||||
|
stride_c = 1
|
||||||
|
self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim)
|
||||||
|
self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
if x.ndim == 3:
|
||||||
|
H, W = self.input_resolution
|
||||||
|
B = len(x)
|
||||||
|
# (B, C, H, W)
|
||||||
|
x = x.view(B, H, W, -1).permute(0, 3, 1, 2)
|
||||||
|
|
||||||
|
x = self.conv1(x)
|
||||||
|
x = self.act(x)
|
||||||
|
|
||||||
|
x = self.conv2(x)
|
||||||
|
x = self.act(x)
|
||||||
|
x = self.conv3(x)
|
||||||
|
x = x.flatten(2).transpose(1, 2)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class ConvLayer(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
input_resolution,
|
||||||
|
depth,
|
||||||
|
activation,
|
||||||
|
drop_path=0.0,
|
||||||
|
downsample=None,
|
||||||
|
use_checkpoint=False,
|
||||||
|
out_dim=None,
|
||||||
|
conv_expand_ratio=4.0,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.dim = dim
|
||||||
|
self.input_resolution = input_resolution
|
||||||
|
self.depth = depth
|
||||||
|
self.use_checkpoint = use_checkpoint
|
||||||
|
|
||||||
|
# build blocks
|
||||||
|
self.blocks = nn.ModuleList(
|
||||||
|
[
|
||||||
|
MBConv(
|
||||||
|
dim,
|
||||||
|
dim,
|
||||||
|
conv_expand_ratio,
|
||||||
|
activation,
|
||||||
|
drop_path[i] if isinstance(drop_path, list) else drop_path,
|
||||||
|
)
|
||||||
|
for i in range(depth)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# patch merging layer
|
||||||
|
if downsample is not None:
|
||||||
|
self.downsample = downsample(
|
||||||
|
input_resolution, dim=dim, out_dim=out_dim, activation=activation
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.downsample = None
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
for blk in self.blocks:
|
||||||
|
if self.use_checkpoint:
|
||||||
|
x = checkpoint.checkpoint(blk, x)
|
||||||
|
else:
|
||||||
|
x = blk(x)
|
||||||
|
if self.downsample is not None:
|
||||||
|
x = self.downsample(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class Mlp(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
in_features,
|
||||||
|
hidden_features=None,
|
||||||
|
out_features=None,
|
||||||
|
act_layer=nn.GELU,
|
||||||
|
drop=0.0,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
out_features = out_features or in_features
|
||||||
|
hidden_features = hidden_features or in_features
|
||||||
|
self.norm = nn.LayerNorm(in_features)
|
||||||
|
self.fc1 = nn.Linear(in_features, hidden_features)
|
||||||
|
self.fc2 = nn.Linear(hidden_features, out_features)
|
||||||
|
self.act = act_layer()
|
||||||
|
self.drop = nn.Dropout(drop)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = self.norm(x)
|
||||||
|
|
||||||
|
x = self.fc1(x)
|
||||||
|
x = self.act(x)
|
||||||
|
x = self.drop(x)
|
||||||
|
x = self.fc2(x)
|
||||||
|
x = self.drop(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class Attention(torch.nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
key_dim,
|
||||||
|
num_heads=8,
|
||||||
|
attn_ratio=4,
|
||||||
|
resolution=(14, 14),
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
# (h, w)
|
||||||
|
assert isinstance(resolution, tuple) and len(resolution) == 2
|
||||||
|
self.num_heads = num_heads
|
||||||
|
self.scale = key_dim**-0.5
|
||||||
|
self.key_dim = key_dim
|
||||||
|
self.nh_kd = nh_kd = key_dim * num_heads
|
||||||
|
self.d = int(attn_ratio * key_dim)
|
||||||
|
self.dh = int(attn_ratio * key_dim) * num_heads
|
||||||
|
self.attn_ratio = attn_ratio
|
||||||
|
h = self.dh + nh_kd * 2
|
||||||
|
|
||||||
|
self.norm = nn.LayerNorm(dim)
|
||||||
|
self.qkv = nn.Linear(dim, h)
|
||||||
|
self.proj = nn.Linear(self.dh, dim)
|
||||||
|
|
||||||
|
points = list(itertools.product(range(resolution[0]), range(resolution[1])))
|
||||||
|
N = len(points)
|
||||||
|
attention_offsets = {}
|
||||||
|
idxs = []
|
||||||
|
for p1 in points:
|
||||||
|
for p2 in points:
|
||||||
|
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
|
||||||
|
if offset not in attention_offsets:
|
||||||
|
attention_offsets[offset] = len(attention_offsets)
|
||||||
|
idxs.append(attention_offsets[offset])
|
||||||
|
self.attention_biases = torch.nn.Parameter(
|
||||||
|
torch.zeros(num_heads, len(attention_offsets))
|
||||||
|
)
|
||||||
|
self.register_buffer(
|
||||||
|
"attention_bias_idxs", torch.LongTensor(idxs).view(N, N), persistent=False
|
||||||
|
)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def train(self, mode=True):
|
||||||
|
super().train(mode)
|
||||||
|
if mode and hasattr(self, "ab"):
|
||||||
|
del self.ab
|
||||||
|
else:
|
||||||
|
self.register_buffer(
|
||||||
|
"ab",
|
||||||
|
self.attention_biases[:, self.attention_bias_idxs],
|
||||||
|
persistent=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x): # x (B,N,C)
|
||||||
|
B, N, _ = x.shape
|
||||||
|
|
||||||
|
# Normalization
|
||||||
|
x = self.norm(x)
|
||||||
|
|
||||||
|
qkv = self.qkv(x)
|
||||||
|
# (B, N, num_heads, d)
|
||||||
|
q, k, v = qkv.view(B, N, self.num_heads, -1).split(
|
||||||
|
[self.key_dim, self.key_dim, self.d], dim=3
|
||||||
|
)
|
||||||
|
# (B, num_heads, N, d)
|
||||||
|
q = q.permute(0, 2, 1, 3)
|
||||||
|
k = k.permute(0, 2, 1, 3)
|
||||||
|
v = v.permute(0, 2, 1, 3)
|
||||||
|
|
||||||
|
attn = (q @ k.transpose(-2, -1)) * self.scale + (
|
||||||
|
self.attention_biases[:, self.attention_bias_idxs]
|
||||||
|
if self.training
|
||||||
|
else self.ab
|
||||||
|
)
|
||||||
|
attn = attn.softmax(dim=-1)
|
||||||
|
x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)
|
||||||
|
x = self.proj(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class TinyViTBlock(nn.Module):
|
||||||
|
r"""TinyViT Block.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dim (int): Number of input channels.
|
||||||
|
input_resolution (tuple[int, int]): Input resolution.
|
||||||
|
num_heads (int): Number of attention heads.
|
||||||
|
window_size (int): Window size.
|
||||||
|
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
||||||
|
drop (float, optional): Dropout rate. Default: 0.0
|
||||||
|
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
||||||
|
local_conv_size (int): the kernel size of the convolution between
|
||||||
|
Attention and MLP. Default: 3
|
||||||
|
activation: the activation function. Default: nn.GELU
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
input_resolution,
|
||||||
|
num_heads,
|
||||||
|
window_size=7,
|
||||||
|
mlp_ratio=4.0,
|
||||||
|
drop=0.0,
|
||||||
|
drop_path=0.0,
|
||||||
|
local_conv_size=3,
|
||||||
|
activation=nn.GELU,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.dim = dim
|
||||||
|
self.input_resolution = input_resolution
|
||||||
|
self.num_heads = num_heads
|
||||||
|
assert window_size > 0, "window_size must be greater than 0"
|
||||||
|
self.window_size = window_size
|
||||||
|
self.mlp_ratio = mlp_ratio
|
||||||
|
|
||||||
|
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
||||||
|
|
||||||
|
assert dim % num_heads == 0, "dim must be divisible by num_heads"
|
||||||
|
head_dim = dim // num_heads
|
||||||
|
|
||||||
|
window_resolution = (window_size, window_size)
|
||||||
|
self.attn = Attention(
|
||||||
|
dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution
|
||||||
|
)
|
||||||
|
|
||||||
|
mlp_hidden_dim = int(dim * mlp_ratio)
|
||||||
|
mlp_activation = activation
|
||||||
|
self.mlp = Mlp(
|
||||||
|
in_features=dim,
|
||||||
|
hidden_features=mlp_hidden_dim,
|
||||||
|
act_layer=mlp_activation,
|
||||||
|
drop=drop,
|
||||||
|
)
|
||||||
|
|
||||||
|
pad = local_conv_size // 2
|
||||||
|
self.local_conv = Conv2d_BN(
|
||||||
|
dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
H, W = self.input_resolution
|
||||||
|
B, L, C = x.shape
|
||||||
|
assert L == H * W, "input feature has wrong size"
|
||||||
|
res_x = x
|
||||||
|
if H == self.window_size and W == self.window_size:
|
||||||
|
x = self.attn(x)
|
||||||
|
else:
|
||||||
|
x = x.view(B, H, W, C)
|
||||||
|
pad_b = (self.window_size - H % self.window_size) % self.window_size
|
||||||
|
pad_r = (self.window_size - W % self.window_size) % self.window_size
|
||||||
|
padding = pad_b > 0 or pad_r > 0
|
||||||
|
|
||||||
|
if padding:
|
||||||
|
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
|
||||||
|
|
||||||
|
pH, pW = H + pad_b, W + pad_r
|
||||||
|
nH = pH // self.window_size
|
||||||
|
nW = pW // self.window_size
|
||||||
|
# window partition
|
||||||
|
x = (
|
||||||
|
x.view(B, nH, self.window_size, nW, self.window_size, C)
|
||||||
|
.transpose(2, 3)
|
||||||
|
.reshape(B * nH * nW, self.window_size * self.window_size, C)
|
||||||
|
)
|
||||||
|
x = self.attn(x)
|
||||||
|
# window reverse
|
||||||
|
x = (
|
||||||
|
x.view(B, nH, nW, self.window_size, self.window_size, C)
|
||||||
|
.transpose(2, 3)
|
||||||
|
.reshape(B, pH, pW, C)
|
||||||
|
)
|
||||||
|
|
||||||
|
if padding:
|
||||||
|
x = x[:, :H, :W].contiguous()
|
||||||
|
|
||||||
|
x = x.view(B, L, C)
|
||||||
|
|
||||||
|
x = res_x + self.drop_path(x)
|
||||||
|
|
||||||
|
x = x.transpose(1, 2).reshape(B, C, H, W)
|
||||||
|
x = self.local_conv(x)
|
||||||
|
x = x.view(B, C, L).transpose(1, 2)
|
||||||
|
|
||||||
|
x = x + self.drop_path(self.mlp(x))
|
||||||
|
return x
|
||||||
|
|
||||||
|
def extra_repr(self) -> str:
|
||||||
|
return (
|
||||||
|
f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, "
|
||||||
|
f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class BasicLayer(nn.Module):
|
||||||
|
"""A basic TinyViT layer for one stage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dim (int): Number of input channels.
|
||||||
|
input_resolution (tuple[int]): Input resolution.
|
||||||
|
depth (int): Number of blocks.
|
||||||
|
num_heads (int): Number of attention heads.
|
||||||
|
window_size (int): Local window size.
|
||||||
|
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
||||||
|
drop (float, optional): Dropout rate. Default: 0.0
|
||||||
|
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
||||||
|
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
||||||
|
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
||||||
|
local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3
|
||||||
|
activation: the activation function. Default: nn.GELU
|
||||||
|
out_dim: the output dimension of the layer. Default: dim
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
input_resolution,
|
||||||
|
depth,
|
||||||
|
num_heads,
|
||||||
|
window_size,
|
||||||
|
mlp_ratio=4.0,
|
||||||
|
drop=0.0,
|
||||||
|
drop_path=0.0,
|
||||||
|
downsample=None,
|
||||||
|
use_checkpoint=False,
|
||||||
|
local_conv_size=3,
|
||||||
|
activation=nn.GELU,
|
||||||
|
out_dim=None,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.dim = dim
|
||||||
|
self.input_resolution = input_resolution
|
||||||
|
self.depth = depth
|
||||||
|
self.use_checkpoint = use_checkpoint
|
||||||
|
|
||||||
|
# build blocks
|
||||||
|
self.blocks = nn.ModuleList(
|
||||||
|
[
|
||||||
|
TinyViTBlock(
|
||||||
|
dim=dim,
|
||||||
|
input_resolution=input_resolution,
|
||||||
|
num_heads=num_heads,
|
||||||
|
window_size=window_size,
|
||||||
|
mlp_ratio=mlp_ratio,
|
||||||
|
drop=drop,
|
||||||
|
drop_path=drop_path[i]
|
||||||
|
if isinstance(drop_path, list)
|
||||||
|
else drop_path,
|
||||||
|
local_conv_size=local_conv_size,
|
||||||
|
activation=activation,
|
||||||
|
)
|
||||||
|
for i in range(depth)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# patch merging layer
|
||||||
|
if downsample is not None:
|
||||||
|
self.downsample = downsample(
|
||||||
|
input_resolution, dim=dim, out_dim=out_dim, activation=activation
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.downsample = None
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
for blk in self.blocks:
|
||||||
|
if self.use_checkpoint:
|
||||||
|
x = checkpoint.checkpoint(blk, x)
|
||||||
|
else:
|
||||||
|
x = blk(x)
|
||||||
|
if self.downsample is not None:
|
||||||
|
x = self.downsample(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
def extra_repr(self) -> str:
|
||||||
|
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
||||||
|
|
||||||
|
|
||||||
|
class LayerNorm2d(nn.Module):
|
||||||
|
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self.weight = nn.Parameter(torch.ones(num_channels))
|
||||||
|
self.bias = nn.Parameter(torch.zeros(num_channels))
|
||||||
|
self.eps = eps
|
||||||
|
|
||||||
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||||
|
u = x.mean(1, keepdim=True)
|
||||||
|
s = (x - u).pow(2).mean(1, keepdim=True)
|
||||||
|
x = (x - u) / torch.sqrt(s + self.eps)
|
||||||
|
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class TinyViT(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
img_size=224,
|
||||||
|
in_chans=3,
|
||||||
|
num_classes=1000,
|
||||||
|
embed_dims=[96, 192, 384, 768],
|
||||||
|
depths=[2, 2, 6, 2],
|
||||||
|
num_heads=[3, 6, 12, 24],
|
||||||
|
window_sizes=[7, 7, 14, 7],
|
||||||
|
mlp_ratio=4.0,
|
||||||
|
drop_rate=0.0,
|
||||||
|
drop_path_rate=0.1,
|
||||||
|
use_checkpoint=False,
|
||||||
|
mbconv_expand_ratio=4.0,
|
||||||
|
local_conv_size=3,
|
||||||
|
layer_lr_decay=1.0,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.img_size = img_size
|
||||||
|
self.num_classes = num_classes
|
||||||
|
self.depths = depths
|
||||||
|
self.num_layers = len(depths)
|
||||||
|
self.mlp_ratio = mlp_ratio
|
||||||
|
|
||||||
|
activation = nn.GELU
|
||||||
|
|
||||||
|
self.patch_embed = PatchEmbed(
|
||||||
|
in_chans=in_chans,
|
||||||
|
embed_dim=embed_dims[0],
|
||||||
|
resolution=img_size,
|
||||||
|
activation=activation,
|
||||||
|
)
|
||||||
|
|
||||||
|
patches_resolution = self.patch_embed.patches_resolution
|
||||||
|
self.patches_resolution = patches_resolution
|
||||||
|
|
||||||
|
# stochastic depth
|
||||||
|
dpr = [
|
||||||
|
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
|
||||||
|
] # stochastic depth decay rule
|
||||||
|
|
||||||
|
# build layers
|
||||||
|
self.layers = nn.ModuleList()
|
||||||
|
for i_layer in range(self.num_layers):
|
||||||
|
kwargs = dict(
|
||||||
|
dim=embed_dims[i_layer],
|
||||||
|
input_resolution=(
|
||||||
|
patches_resolution[0]
|
||||||
|
// (2 ** (i_layer - 1 if i_layer == 3 else i_layer)),
|
||||||
|
patches_resolution[1]
|
||||||
|
// (2 ** (i_layer - 1 if i_layer == 3 else i_layer)),
|
||||||
|
),
|
||||||
|
# input_resolution=(patches_resolution[0] // (2 ** i_layer),
|
||||||
|
# patches_resolution[1] // (2 ** i_layer)),
|
||||||
|
depth=depths[i_layer],
|
||||||
|
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
|
||||||
|
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
|
||||||
|
use_checkpoint=use_checkpoint,
|
||||||
|
out_dim=embed_dims[min(i_layer + 1, len(embed_dims) - 1)],
|
||||||
|
activation=activation,
|
||||||
|
)
|
||||||
|
if i_layer == 0:
|
||||||
|
layer = ConvLayer(
|
||||||
|
conv_expand_ratio=mbconv_expand_ratio,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
layer = BasicLayer(
|
||||||
|
num_heads=num_heads[i_layer],
|
||||||
|
window_size=window_sizes[i_layer],
|
||||||
|
mlp_ratio=self.mlp_ratio,
|
||||||
|
drop=drop_rate,
|
||||||
|
local_conv_size=local_conv_size,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
self.layers.append(layer)
|
||||||
|
|
||||||
|
# Classifier head
|
||||||
|
self.norm_head = nn.LayerNorm(embed_dims[-1])
|
||||||
|
self.head = (
|
||||||
|
nn.Linear(embed_dims[-1], num_classes)
|
||||||
|
if num_classes > 0
|
||||||
|
else torch.nn.Identity()
|
||||||
|
)
|
||||||
|
|
||||||
|
# init weights
|
||||||
|
self.apply(self._init_weights)
|
||||||
|
self.set_layer_lr_decay(layer_lr_decay)
|
||||||
|
self.neck = nn.Sequential(
|
||||||
|
nn.Conv2d(
|
||||||
|
embed_dims[-1],
|
||||||
|
256,
|
||||||
|
kernel_size=1,
|
||||||
|
bias=False,
|
||||||
|
),
|
||||||
|
LayerNorm2d(256),
|
||||||
|
nn.Conv2d(
|
||||||
|
256,
|
||||||
|
256,
|
||||||
|
kernel_size=3,
|
||||||
|
padding=1,
|
||||||
|
bias=False,
|
||||||
|
),
|
||||||
|
LayerNorm2d(256),
|
||||||
|
)
|
||||||
|
|
||||||
|
def set_layer_lr_decay(self, layer_lr_decay):
|
||||||
|
decay_rate = layer_lr_decay
|
||||||
|
|
||||||
|
# layers -> blocks (depth)
|
||||||
|
depth = sum(self.depths)
|
||||||
|
lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]
|
||||||
|
# print("LR SCALES:", lr_scales)
|
||||||
|
|
||||||
|
def _set_lr_scale(m, scale):
|
||||||
|
for p in m.parameters():
|
||||||
|
p.lr_scale = scale
|
||||||
|
|
||||||
|
self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))
|
||||||
|
i = 0
|
||||||
|
for layer in self.layers:
|
||||||
|
for block in layer.blocks:
|
||||||
|
block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))
|
||||||
|
i += 1
|
||||||
|
if layer.downsample is not None:
|
||||||
|
layer.downsample.apply(lambda x: _set_lr_scale(x, lr_scales[i - 1]))
|
||||||
|
assert i == depth
|
||||||
|
for m in [self.norm_head, self.head]:
|
||||||
|
m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))
|
||||||
|
|
||||||
|
for k, p in self.named_parameters():
|
||||||
|
p.param_name = k
|
||||||
|
|
||||||
|
def _check_lr_scale(m):
|
||||||
|
for p in m.parameters():
|
||||||
|
assert hasattr(p, "lr_scale"), p.param_name
|
||||||
|
|
||||||
|
self.apply(_check_lr_scale)
|
||||||
|
|
||||||
|
def _init_weights(self, m):
|
||||||
|
if isinstance(m, nn.Linear):
|
||||||
|
trunc_normal_(m.weight, std=0.02)
|
||||||
|
if isinstance(m, nn.Linear) and m.bias is not None:
|
||||||
|
nn.init.constant_(m.bias, 0)
|
||||||
|
elif isinstance(m, nn.LayerNorm):
|
||||||
|
nn.init.constant_(m.bias, 0)
|
||||||
|
nn.init.constant_(m.weight, 1.0)
|
||||||
|
|
||||||
|
@torch.jit.ignore
|
||||||
|
def no_weight_decay_keywords(self):
|
||||||
|
return {"attention_biases"}
|
||||||
|
|
||||||
|
def forward_features(self, x):
|
||||||
|
# x: (N, C, H, W)
|
||||||
|
x = self.patch_embed(x)
|
||||||
|
|
||||||
|
x = self.layers[0](x)
|
||||||
|
start_i = 1
|
||||||
|
|
||||||
|
for i in range(start_i, len(self.layers)):
|
||||||
|
layer = self.layers[i]
|
||||||
|
x = layer(x)
|
||||||
|
B, _, C = x.size()
|
||||||
|
x = x.view(B, 64, 64, C)
|
||||||
|
x = x.permute(0, 3, 1, 2)
|
||||||
|
x = self.neck(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = self.forward_features(x)
|
||||||
|
# x = self.norm_head(x)
|
||||||
|
# x = self.head(x)
|
||||||
|
return x
|
Loading…
Reference in New Issue
Block a user