chris1nexus
First commit
54660f7
raw
history blame
No virus
14.8 kB
""" Vision Transformer (ViT) in PyTorch
"""
import torch
import torch.nn as nn
from einops import rearrange
from .layers import *
import math
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models
'vit_small_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth',
),
'vit_base_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
'vit_large_patch16_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
}
def compute_rollout_attention(all_layer_matrices, start_layer=0):
# adding residual consideration
num_tokens = all_layer_matrices[0].shape[1]
batch_size = all_layer_matrices[0].shape[0]
eye = torch.eye(num_tokens).expand(batch_size, num_tokens, num_tokens).to(all_layer_matrices[0].device)
all_layer_matrices = [all_layer_matrices[i] + eye for i in range(len(all_layer_matrices))]
# all_layer_matrices = [all_layer_matrices[i] / all_layer_matrices[i].sum(dim=-1, keepdim=True)
# for i in range(len(all_layer_matrices))]
joint_attention = all_layer_matrices[start_layer]
for i in range(start_layer+1, len(all_layer_matrices)):
joint_attention = all_layer_matrices[i].bmm(joint_attention)
return joint_attention
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = Linear(in_features, hidden_features)
self.act = GELU()
self.fc2 = Linear(hidden_features, out_features)
self.drop = Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def relprop(self, cam, **kwargs):
cam = self.drop.relprop(cam, **kwargs)
cam = self.fc2.relprop(cam, **kwargs)
cam = self.act.relprop(cam, **kwargs)
cam = self.fc1.relprop(cam, **kwargs)
return cam
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False,attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = head_dim ** -0.5
# A = Q*K^T
self.matmul1 = einsum('bhid,bhjd->bhij')
# attn = A*V
self.matmul2 = einsum('bhij,bhjd->bhid')
self.qkv = Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = Dropout(attn_drop)
self.proj = Linear(dim, dim)
self.proj_drop = Dropout(proj_drop)
self.softmax = Softmax(dim=-1)
self.attn_cam = None
self.attn = None
self.v = None
self.v_cam = None
self.attn_gradients = None
def get_attn(self):
return self.attn
def save_attn(self, attn):
self.attn = attn
def save_attn_cam(self, cam):
self.attn_cam = cam
def get_attn_cam(self):
return self.attn_cam
def get_v(self):
return self.v
def save_v(self, v):
self.v = v
def save_v_cam(self, cam):
self.v_cam = cam
def get_v_cam(self):
return self.v_cam
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def forward(self, x):
b, n, _, h = *x.shape, self.num_heads
qkv = self.qkv(x)
q, k, v = rearrange(qkv, 'b n (qkv h d) -> qkv b h n d', qkv=3, h=h)
self.save_v(v)
dots = self.matmul1([q, k]) * self.scale
attn = self.softmax(dots)
attn = self.attn_drop(attn)
# Get attention
if False:
from os import path
if not path.exists('att_1.pt'):
torch.save(attn, 'att_1.pt')
elif not path.exists('att_2.pt'):
torch.save(attn, 'att_2.pt')
else:
torch.save(attn, 'att_3.pt')
#comment in training
if x.requires_grad:
self.save_attn(attn)
attn.register_hook(self.save_attn_gradients)
out = self.matmul2([attn, v])
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.proj(out)
out = self.proj_drop(out)
return out
def relprop(self, cam, **kwargs):
cam = self.proj_drop.relprop(cam, **kwargs)
cam = self.proj.relprop(cam, **kwargs)
cam = rearrange(cam, 'b n (h d) -> b h n d', h=self.num_heads)
# attn = A*V
(cam1, cam_v)= self.matmul2.relprop(cam, **kwargs)
cam1 /= 2
cam_v /= 2
self.save_v_cam(cam_v)
self.save_attn_cam(cam1)
cam1 = self.attn_drop.relprop(cam1, **kwargs)
cam1 = self.softmax.relprop(cam1, **kwargs)
# A = Q*K^T
(cam_q, cam_k) = self.matmul1.relprop(cam1, **kwargs)
cam_q /= 2
cam_k /= 2
cam_qkv = rearrange([cam_q, cam_k, cam_v], 'qkv b h n d -> b n (qkv h d)', qkv=3, h=self.num_heads)
return self.qkv.relprop(cam_qkv, **kwargs)
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.):
super().__init__()
self.norm1 = LayerNorm(dim, eps=1e-6)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.norm2 = LayerNorm(dim, eps=1e-6)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, drop=drop)
self.add1 = Add()
self.add2 = Add()
self.clone1 = Clone()
self.clone2 = Clone()
def forward(self, x):
x1, x2 = self.clone1(x, 2)
x = self.add1([x1, self.attn(self.norm1(x2))])
x1, x2 = self.clone2(x, 2)
x = self.add2([x1, self.mlp(self.norm2(x2))])
return x
def relprop(self, cam, **kwargs):
(cam1, cam2) = self.add2.relprop(cam, **kwargs)
cam2 = self.mlp.relprop(cam2, **kwargs)
cam2 = self.norm2.relprop(cam2, **kwargs)
cam = self.clone2.relprop((cam1, cam2), **kwargs)
(cam1, cam2) = self.add1.relprop(cam, **kwargs)
cam2 = self.attn.relprop(cam2, **kwargs)
cam2 = self.norm1.relprop(cam2, **kwargs)
cam = self.clone1.relprop((cam1, cam2), **kwargs)
return cam
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, num_classes=2, embed_dim=64, depth=3,
num_heads=8, mlp_ratio=2., qkv_bias=False, mlp_head=False, drop_rate=0., attn_drop_rate=0.):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate)
for i in range(depth)])
self.norm = LayerNorm(embed_dim)
if mlp_head:
# paper diagram suggests 'MLP head', but results in 4M extra parameters vs paper
self.head = Mlp(embed_dim, int(embed_dim * mlp_ratio), num_classes)
else:
# with a single Linear layer as head, the param count within rounding of paper
self.head = Linear(embed_dim, num_classes)
#self.apply(self._init_weights)
self.pool = IndexSelect()
self.add = Add()
self.inp_grad = None
def save_inp_grad(self,grad):
self.inp_grad = grad
def get_inp_grad(self):
return self.inp_grad
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@property
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward(self, x):
if x.requires_grad:
x.register_hook(self.save_inp_grad) #comment it in train
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
x = self.pool(x, dim=1, indices=torch.tensor(0, device=x.device))
x = x.squeeze(1)
x = self.head(x)
return x
def relprop(self, cam=None,method="transformer_attribution", is_ablation=False, start_layer=0, **kwargs):
# print(kwargs)
# print("conservation 1", cam.sum())
cam = self.head.relprop(cam, **kwargs)
cam = cam.unsqueeze(1)
cam = self.pool.relprop(cam, **kwargs)
cam = self.norm.relprop(cam, **kwargs)
for blk in reversed(self.blocks):
cam = blk.relprop(cam, **kwargs)
# print("conservation 2", cam.sum())
# print("min", cam.min())
if method == "full":
(cam, _) = self.add.relprop(cam, **kwargs)
cam = cam[:, 1:]
cam = self.patch_embed.relprop(cam, **kwargs)
# sum on channels
cam = cam.sum(dim=1)
return cam
elif method == "rollout":
# cam rollout
attn_cams = []
for blk in self.blocks:
attn_heads = blk.attn.get_attn_cam().clamp(min=0)
avg_heads = (attn_heads.sum(dim=1) / attn_heads.shape[1]).detach()
attn_cams.append(avg_heads)
cam = compute_rollout_attention(attn_cams, start_layer=start_layer)
cam = cam[:, 0, 1:]
return cam
# our method, method name grad is legacy
elif method == "transformer_attribution" or method == "grad":
cams = []
for blk in self.blocks:
grad = blk.attn.get_attn_gradients()
cam = blk.attn.get_attn_cam()
cam = cam[0].reshape(-1, cam.shape[-1], cam.shape[-1])
grad = grad[0].reshape(-1, grad.shape[-1], grad.shape[-1])
cam = grad * cam
cam = cam.clamp(min=0).mean(dim=0)
cams.append(cam.unsqueeze(0))
rollout = compute_rollout_attention(cams, start_layer=start_layer)
cam = rollout[:, 0, 1:]
return cam
elif method == "last_layer":
cam = self.blocks[-1].attn.get_attn_cam()
cam = cam[0].reshape(-1, cam.shape[-1], cam.shape[-1])
if is_ablation:
grad = self.blocks[-1].attn.get_attn_gradients()
grad = grad[0].reshape(-1, grad.shape[-1], grad.shape[-1])
cam = grad * cam
cam = cam.clamp(min=0).mean(dim=0)
cam = cam[0, 1:]
return cam
elif method == "last_layer_attn":
cam = self.blocks[-1].attn.get_attn()
cam = cam[0].reshape(-1, cam.shape[-1], cam.shape[-1])
cam = cam.clamp(min=0).mean(dim=0)
cam = cam[0, 1:]
return cam
elif method == "second_layer":
cam = self.blocks[1].attn.get_attn_cam()
cam = cam[0].reshape(-1, cam.shape[-1], cam.shape[-1])
if is_ablation:
grad = self.blocks[1].attn.get_attn_gradients()
grad = grad[0].reshape(-1, grad.shape[-1], grad.shape[-1])
cam = grad * cam
cam = cam.clamp(min=0).mean(dim=0)
cam = cam[0, 1:]
return cam