File size: 8,688 Bytes
d4b83d9 0f31c51 d4b83d9 0f31c51 d4b83d9 0f31c51 d4b83d9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
import cv2
import numpy as np
import torch
import ttach as tta
from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients
from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection
class BaseCAM:
def __init__(self,
model,
target_layers,
use_cuda=False,
reshape_transform=None,
compute_input_gradient=False,
uses_gradients=True):
self.model = model.eval()
self.target_layers = target_layers
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
else:
self.model = model.cpu()
self.reshape_transform = reshape_transform
self.compute_input_gradient = compute_input_gradient
self.uses_gradients = uses_gradients
self.activations_and_grads = ActivationsAndGradients(
self.model, target_layers, reshape_transform)
""" Get a vector of weights for every channel in the target layer.
Methods that return weights channels,
will typically need to only implement this function. """
def get_cam_weights(self,
input_tensor,
target_layers,
target_category,
activations,
grads):
raise Exception("Not Implemented")
def get_objective(self, input_encoding, target_encoding):
# input and target encoding should be normalized!
input_encoding_norm = input_encoding.norm(dim=-1, keepdim=True)
input_encoding = input_encoding / input_encoding_norm
target_encoding_norm = target_encoding.norm(dim=-1, keepdim=True)
target_encoding = target_encoding / target_encoding_norm
return input_encoding[0].dot(target_encoding[0])
def get_cam_image(self,
input_tensor,
target_layer,
target_category,
activations,
grads,
eigen_smooth=False):
weights = self.get_cam_weights(input_tensor, target_layer,
target_category, activations, grads)
weighted_activations = weights[:, :, None, None] * activations
if eigen_smooth:
cam = get_2d_projection(weighted_activations)
else:
cam = weighted_activations.sum(axis=1)
return cam
def forward(self, input_tensor, target_encoding, target_category=None, eigen_smooth=False):
if self.cuda:
input_tensor = input_tensor.cuda()
if self.compute_input_gradient:
input_tensor = torch.autograd.Variable(input_tensor,
requires_grad=True)
# output will be the image encoding
output = self.activations_and_grads(input_tensor)
if isinstance(target_category, int):
target_category = [target_category] * input_tensor.size(0)
if target_category is None:
target_category = np.argmax(output.cpu().data.numpy(), axis=-1)
else:
assert(len(target_category) == input_tensor.size(0))
if self.uses_gradients:
self.model.zero_grad()
#objective = self.get_objective(output, target_encoding)
output_norm = output.norm(dim=-1, keepdim=True)
output = output / output_norm
target_encoding_norm = target_encoding.norm(dim=-1, keepdim=True)
target_encoding = target_encoding / target_encoding_norm
objective = output[0].dot(target_encoding[0])
objective.backward(retain_graph=True)
# In most of the saliency attribution papers, the saliency is
# computed with a single target layer.
# Commonly it is the last convolutional layer.
# Here we support passing a list with multiple target layers.
# It will compute the saliency image for every image,
# and then aggregate them (with a default mean aggregation).
# This gives you more flexibility in case you just want to
# use all conv layers for example, all Batchnorm layers,
# or something else.
cam_per_layer = self.compute_cam_per_layer(input_tensor,
target_category,
eigen_smooth)
#return self.aggregate_multi_layers(cam_per_layer)
return cam_per_layer
def get_target_width_height(self, input_tensor):
width, height = input_tensor.size(-1), input_tensor.size(-2)
return width, height
def compute_cam_per_layer(
self,
input_tensor,
target_category,
eigen_smooth):
activations_list = [a.cpu().data.numpy()
for a in self.activations_and_grads.activations]
grads_list = [g.cpu().data.numpy()
for g in self.activations_and_grads.gradients]
target_size = self.get_target_width_height(input_tensor)
cam_per_target_layer = []
# Loop over the saliency image from every layer
for target_layer, layer_activations, layer_grads in \
zip(self.target_layers, activations_list, grads_list):
cam = self.get_cam_image(input_tensor,
target_layer,
target_category,
layer_activations,
layer_grads,
eigen_smooth)
cam = np.maximum(cam, 0) # works like mute the min-max scale in the function of scale_cam_image
scaled = cam#self.scale_cam_image(cam, target_size)
cam_per_target_layer.append(scaled[:, None, :])
return cam_per_target_layer
def aggregate_multi_layers(self, cam_per_target_layer):
cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1)
cam_per_target_layer = np.maximum(cam_per_target_layer, 0)
result = np.mean(cam_per_target_layer, axis=1)
return self.scale_cam_image(result)
def scale_cam_image(self, cam, target_size=None):
result = []
for img in cam:
img = img - np.min(img)
img = img / (1e-7 + np.max(img))
img = np.float32(img)
if target_size is not None:
img = cv2.resize(img, target_size)
result.append(img)
result = np.float32(result)
return result
def forward_augmentation_smoothing(self,
input_tensor,
target_category=None,
eigen_smooth=False):
transforms = tta.Compose(
[
tta.HorizontalFlip(),
tta.Multiply(factors=[0.9, 1, 1.1]),
]
)
cams = []
for transform in transforms:
augmented_tensor = transform.augment_image(input_tensor)
cam = self.forward(augmented_tensor,
target_category, eigen_smooth)
# The ttach library expects a tensor of size BxCxHxW
cam = cam[:, None, :, :]
cam = torch.from_numpy(cam)
cam = transform.deaugment_mask(cam)
# Back to numpy float32, HxW
cam = cam.numpy()
cam = cam[:, 0, :, :]
cams.append(cam)
cam = np.mean(np.float32(cams), axis=0)
return cam
def __call__(self,
input_tensor,
target_encoding,
target_category=None,
aug_smooth=False,
eigen_smooth=False):
# Smooth the CAM result with test time augmentation
if aug_smooth is True:
return self.forward_augmentation_smoothing(
input_tensor, target_category, eigen_smooth)
return self.forward(input_tensor, target_encoding,
target_category, eigen_smooth)
def __del__(self):
self.activations_and_grads.release()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.activations_and_grads.release()
if isinstance(exc_value, IndexError):
# Handle IndexError here...
print(
f"An exception occurred in CAM with block: {exc_type}. Message: {exc_value}")
return True
|