PrarthanaTS commited on
Commit
02c7167
1 Parent(s): ad0185c

Upload 7 files

Browse files
Files changed (7) hide show
  1. callbacks.py +107 -0
  2. config.py +102 -0
  3. dataset.py +226 -0
  4. loss.py +79 -0
  5. main_yolov3_lightening.py +107 -0
  6. utils.py +608 -0
  7. utils_for_app.py +255 -0
callbacks.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import lightning.pytorch as pl
2
+ import config
3
+ from utils import (check_class_accuracy,get_evaluation_bboxes,mean_average_precision,plot_couple_examples)
4
+ from lightning.pytorch.callbacks import Callback
5
+
6
+ class PlotTestExamplesCallback(Callback):
7
+ def __init__(self, every_n_epochs: int = 1) -> None:
8
+ super().__init__()
9
+ self.every_n_epochs = every_n_epochs
10
+
11
+ def on_train_epoch_end(
12
+ self, trainer: pl.Trainer, pl_module: pl.LightningModule
13
+ ) -> None:
14
+ if (trainer.current_epoch + 1) % self.every_n_epochs == 0:
15
+ plot_couple_examples(
16
+ model=pl_module,
17
+ loader=pl_module.train_dataloader(),
18
+ thresh=0.6,
19
+ iou_thresh=0.5,
20
+ anchors=pl_module.scaled_anchors,
21
+ )
22
+
23
+
24
+ class CheckClassAccuracyCallback(pl.Callback):
25
+ def __init__(
26
+ self, train_every_n_epochs: int = 1, test_every_n_epochs: int = 3
27
+ ) -> None:
28
+ super().__init__()
29
+ self.train_every_n_epochs = train_every_n_epochs
30
+ self.test_every_n_epochs = test_every_n_epochs
31
+
32
+ def on_train_epoch_end(
33
+ self, trainer: pl.Trainer, pl_module: pl.LightningModule
34
+ ) -> None:
35
+ if (trainer.current_epoch + 1) % self.train_every_n_epochs == 0:
36
+ class_acc, no_obj_acc, obj_acc = check_class_accuracy(
37
+ model=pl_module,
38
+ loader=pl_module.train_dataloader(),
39
+ threshold=config.CONF_THRESHOLD,
40
+ )
41
+ pl_module.log_dict(
42
+ {
43
+ "train_class_acc": class_acc,
44
+ "train_no_obj_acc": no_obj_acc,
45
+ "train_obj_acc": obj_acc,
46
+ },
47
+ logger=True,
48
+ )
49
+ print("Train Metrics")
50
+ print(f"Epoch: {trainer.current_epoch}")
51
+ print(f"Loss: {trainer.callback_metrics['train_loss_epoch']}")
52
+ print(f"Class Accuracy: {class_acc:2f}%")
53
+ print(f"No Object Accuracy: {no_obj_acc:2f}%")
54
+ print(f"Object Accuracy: {obj_acc:2f}%")
55
+
56
+ if (trainer.current_epoch + 1) % self.test_every_n_epochs == 0:
57
+ class_acc, no_obj_acc, obj_acc = check_class_accuracy(
58
+ model=pl_module,
59
+ loader=pl_module.test_dataloader(),
60
+ threshold=config.CONF_THRESHOLD,
61
+ )
62
+ pl_module.log_dict(
63
+ {
64
+ "test_class_acc": class_acc,
65
+ "test_no_obj_acc": no_obj_acc,
66
+ "test_obj_acc": obj_acc,
67
+ },
68
+ logger=True,
69
+ )
70
+
71
+ print("Test Metrics")
72
+ print(f"Class Accuracy: {class_acc:2f}%")
73
+ print(f"No Object Accuracy: {no_obj_acc:2f}%")
74
+ print(f"Object Accuracy: {obj_acc:2f}%")
75
+
76
+ class MAPCallback(pl.Callback):
77
+ def __init__(self, every_n_epochs: int = 3) -> None:
78
+ super().__init__()
79
+ self.every_n_epochs = every_n_epochs
80
+
81
+ def on_train_epoch_end(
82
+ self, trainer: pl.Trainer, pl_module: pl.LightningModule
83
+ ) -> None:
84
+ if (trainer.current_epoch + 1) % self.every_n_epochs == 0:
85
+ pred_boxes, true_boxes = get_evaluation_bboxes(
86
+ loader=pl_module.test_dataloader(),
87
+ model=pl_module,
88
+ iou_threshold=config.NMS_IOU_THRESH,
89
+ anchors=config.ANCHORS,
90
+ threshold=config.CONF_THRESHOLD,
91
+ device=config.DEVICE,
92
+ )
93
+
94
+ map_val = mean_average_precision(
95
+ pred_boxes=pred_boxes,
96
+ true_boxes=true_boxes,
97
+ iou_threshold=config.MAP_IOU_THRESH,
98
+ box_format="midpoint",
99
+ num_classes=config.NUM_CLASSES,
100
+ )
101
+ print("MAP: ", map_val.item())
102
+ pl_module.log(
103
+ "MAP",
104
+ map_val.item(),
105
+ logger=True,
106
+ )
107
+ pl_module.train()
config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import albumentations as A
2
+ import cv2
3
+ import torch
4
+
5
+ from albumentations.pytorch import ToTensorV2
6
+ # from utils import seed_everything
7
+
8
+ DATASET = 'PASCAL_VOC'
9
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
10
+ # seed_everything() # If you want deterministic behavior
11
+ NUM_WORKERS = 4
12
+ BATCH_SIZE = 16
13
+ IMAGE_SIZE = 416
14
+ NUM_CLASSES = 20
15
+ LEARNING_RATE = 1e-5
16
+ WEIGHT_DECAY = 1e-4
17
+ NUM_EPOCHS = 100
18
+ CONF_THRESHOLD = 0.05
19
+ MAP_IOU_THRESH = 0.5
20
+ NMS_IOU_THRESH = 0.45
21
+ S = [IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8]
22
+ PIN_MEMORY = True
23
+ LOAD_MODEL = False
24
+ SAVE_MODEL = True
25
+ CHECKPOINT_FILE = "checkpoint.pth.tar"
26
+ IMG_DIR = DATASET + "/images/"
27
+ LABEL_DIR = DATASET + "/labels/"
28
+
29
+ ANCHORS = [
30
+ [(0.28, 0.22), (0.38, 0.48), (0.9, 0.78)],
31
+ [(0.07, 0.15), (0.15, 0.11), (0.14, 0.29)],
32
+ [(0.02, 0.03), (0.04, 0.07), (0.08, 0.06)],
33
+ ] # Note these have been rescaled to be between [0, 1]
34
+
35
+ means = [0.485, 0.456, 0.406]
36
+
37
+ scale = 1.1
38
+ train_transforms = A.Compose(
39
+ [
40
+ A.LongestMaxSize(max_size=int(IMAGE_SIZE * scale)),
41
+ A.PadIfNeeded(
42
+ min_height=int(IMAGE_SIZE * scale),
43
+ min_width=int(IMAGE_SIZE * scale),
44
+ border_mode=cv2.BORDER_CONSTANT,
45
+ ),
46
+ A.Rotate(limit = 10, interpolation=1, border_mode=4),
47
+ A.RandomCrop(width=IMAGE_SIZE, height=IMAGE_SIZE),
48
+ A.ColorJitter(brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6, p=0.4),
49
+ A.OneOf(
50
+ [
51
+ A.ShiftScaleRotate(
52
+ rotate_limit=20, p=0.5, border_mode=cv2.BORDER_CONSTANT
53
+ ),
54
+ # A.Affine(shear=15, p=0.5, mode="constant"),
55
+ ],
56
+ p=1.0,
57
+ ),
58
+ A.HorizontalFlip(p=0.5),
59
+ A.Blur(p=0.1),
60
+ A.CLAHE(p=0.1),
61
+ A.Posterize(p=0.1),
62
+ A.ToGray(p=0.1),
63
+ A.ChannelShuffle(p=0.05),
64
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
65
+ ToTensorV2(),
66
+ ],
67
+ bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[],),
68
+ )
69
+ test_transforms = A.Compose(
70
+ [
71
+ A.LongestMaxSize(max_size=IMAGE_SIZE),
72
+ A.PadIfNeeded(
73
+ min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
74
+ ),
75
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
76
+ ToTensorV2(),
77
+ ],
78
+ bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[]),
79
+ )
80
+
81
+ PASCAL_CLASSES = [
82
+ "aeroplane",
83
+ "bicycle",
84
+ "bird",
85
+ "boat",
86
+ "bottle",
87
+ "bus",
88
+ "car",
89
+ "cat",
90
+ "chair",
91
+ "cow",
92
+ "diningtable",
93
+ "dog",
94
+ "horse",
95
+ "motorbike",
96
+ "person",
97
+ "pottedplant",
98
+ "sheep",
99
+ "sofa",
100
+ "train",
101
+ "tvmonitor"
102
+ ]
dataset.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Creates a Pytorch dataset to load the Pascal VOC & MS COCO datasets
3
+ """
4
+
5
+ import config
6
+ import numpy as np
7
+ import os
8
+ import pandas as pd
9
+ import torch
10
+ from utils import xywhn2xyxy, xyxy2xywhn
11
+ import random
12
+
13
+ from PIL import Image, ImageFile
14
+ from torch.utils.data import Dataset, DataLoader
15
+ from utils import (
16
+ cells_to_bboxes,
17
+ iou_width_height as iou,
18
+ non_max_suppression as nms,
19
+ plot_image
20
+ )
21
+
22
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
23
+
24
+ class YOLOTrainDataset(Dataset):
25
+ def __init__(
26
+ self,
27
+ csv_file,
28
+ img_dir,
29
+ label_dir,
30
+ anchors,
31
+ image_size=416,
32
+ S=[13, 26, 52],
33
+ C=20,
34
+ transform=None,
35
+ ):
36
+ self.annotations = pd.read_csv(csv_file)
37
+ self.img_dir = img_dir
38
+ self.label_dir = label_dir
39
+ self.image_size = image_size
40
+ self.mosaic_border = [image_size // 2, image_size // 2]
41
+ self.transform = transform
42
+ self.S = S
43
+ self.anchors = torch.tensor(anchors[0] + anchors[1] + anchors[2]) # for all 3 scales
44
+ self.num_anchors = self.anchors.shape[0]
45
+ self.num_anchors_per_scale = self.num_anchors // 3
46
+ self.C = C
47
+ self.ignore_iou_thresh = 0.5
48
+ self.counter = 0
49
+
50
+ def __len__(self):
51
+ return len(self.annotations)
52
+
53
+ def load_mosaic(self, index):
54
+ # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
55
+ labels4 = []
56
+ s = self.image_size
57
+ yc, xc = (int(random.uniform(x, 2 * s - x)) for x in self.mosaic_border) # mosaic center x, y
58
+ indices = [index] + random.choices(range(len(self)), k=3) # 3 additional image indices
59
+ random.shuffle(indices)
60
+ for i, index in enumerate(indices):
61
+ # Load image
62
+ label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
63
+ bboxes = np.roll(np.loadtxt(fname=label_path, delimiter=" ", ndmin=2), 4, axis=1).tolist()
64
+ img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
65
+ img = np.array(Image.open(img_path).convert("RGB"))
66
+
67
+
68
+ h, w = img.shape[0], img.shape[1]
69
+ labels = np.array(bboxes)
70
+
71
+ # place img in img4
72
+ if i == 0: # top left
73
+ img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
74
+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
75
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
76
+ elif i == 1: # top right
77
+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
78
+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
79
+ elif i == 2: # bottom left
80
+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
81
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
82
+ elif i == 3: # bottom right
83
+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
84
+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
85
+
86
+ img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
87
+ padw = x1a - x1b
88
+ padh = y1a - y1b
89
+
90
+ # Labels
91
+ if labels.size:
92
+ labels[:, :-1] = xywhn2xyxy(labels[:, :-1], w, h, padw, padh) # normalized xywh to pixel xyxy format
93
+ labels4.append(labels)
94
+
95
+ # Concat/clip labels
96
+ labels4 = np.concatenate(labels4, 0)
97
+ for x in (labels4[:, :-1],):
98
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
99
+ # img4, labels4 = replicate(img4, labels4) # replicate
100
+ labels4[:, :-1] = xyxy2xywhn(labels4[:, :-1], 2 * s, 2 * s)
101
+ labels4[:, :-1] = np.clip(labels4[:, :-1], 0, 1)
102
+ labels4 = labels4[labels4[:, 2] > 0]
103
+ labels4 = labels4[labels4[:, 3] > 0]
104
+ return img4, labels4
105
+
106
+ def __getitem__(self, index):
107
+
108
+ # 75% probability to apply mosaic
109
+ self.counter = (self.counter + 1) % 4
110
+ if self.counter != 0:
111
+ image, bboxes = self.load_mosaic(index)
112
+ # Else, load normally without mosaic
113
+ else:
114
+ # Load image and bbox
115
+ label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
116
+ bboxes = np.roll(np.loadtxt(fname=label_path, delimiter=" ", ndmin=2), 4, axis=1).tolist()
117
+ img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
118
+ image = np.array(Image.open(img_path).convert("RGB"))
119
+
120
+ if self.transform:
121
+ augmentations = self.transform(image=image, bboxes=bboxes)
122
+ image = augmentations["image"]
123
+ bboxes = augmentations["bboxes"]
124
+
125
+ # Below assumes 3 scale predictions (as paper) and same num of anchors per scale
126
+ targets = [torch.zeros((self.num_anchors // 3, S, S, 6)) for S in self.S]
127
+ for box in bboxes:
128
+ iou_anchors = iou(torch.tensor(box[2:4]), self.anchors)
129
+ anchor_indices = iou_anchors.argsort(descending=True, dim=0)
130
+ x, y, width, height, class_label = box
131
+ has_anchor = [False] * 3 # each scale should have one anchor
132
+ for anchor_idx in anchor_indices:
133
+ scale_idx = anchor_idx // self.num_anchors_per_scale
134
+ anchor_on_scale = anchor_idx % self.num_anchors_per_scale
135
+ S = self.S[scale_idx]
136
+ i, j = int(S * y), int(S * x) # which cell
137
+ anchor_taken = targets[scale_idx][anchor_on_scale, i, j, 0]
138
+ if not anchor_taken and not has_anchor[scale_idx]:
139
+ targets[scale_idx][anchor_on_scale, i, j, 0] = 1
140
+ x_cell, y_cell = S * x - j, S * y - i # both between [0,1]
141
+ width_cell, height_cell = (
142
+ width * S,
143
+ height * S,
144
+ ) # can be greater than 1 since it's relative to cell
145
+ box_coordinates = torch.tensor(
146
+ [x_cell, y_cell, width_cell, height_cell]
147
+ )
148
+ targets[scale_idx][anchor_on_scale, i, j, 1:5] = box_coordinates
149
+ targets[scale_idx][anchor_on_scale, i, j, 5] = int(class_label)
150
+ has_anchor[scale_idx] = True
151
+
152
+ elif not anchor_taken and iou_anchors[anchor_idx] > self.ignore_iou_thresh:
153
+ targets[scale_idx][anchor_on_scale, i, j, 0] = -1 # ignore prediction
154
+
155
+ return image, tuple(targets)
156
+
157
+ class YOLOTestDataset(Dataset):
158
+ def __init__(
159
+ self,
160
+ csv_file,
161
+ img_dir,
162
+ label_dir,
163
+ anchors,
164
+ image_size=416,
165
+ S=[13, 26, 52],
166
+ C=20,
167
+ transform=None,
168
+ ):
169
+ self.annotations = pd.read_csv(csv_file)
170
+ self.img_dir = img_dir
171
+ self.label_dir = label_dir
172
+ self.image_size = image_size
173
+ self.transform = transform
174
+ self.S = S
175
+ self.anchors = torch.tensor(anchors[0] + anchors[1] + anchors[2]) # for all 3 scales
176
+ self.num_anchors = self.anchors.shape[0]
177
+ self.num_anchors_per_scale = self.num_anchors // 3
178
+ self.C = C
179
+ self.ignore_iou_thresh = 0.5
180
+
181
+ def __len__(self):
182
+ return len(self.annotations)
183
+
184
+ def __getitem__(self, index):
185
+ label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
186
+ bboxes = np.roll(np.loadtxt(fname=label_path, delimiter=" ", ndmin=2), 4, axis=1).tolist()
187
+ img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
188
+ image = np.array(Image.open(img_path).convert("RGB"))
189
+
190
+ if self.transform:
191
+ augmentations = self.transform(image=image, bboxes=bboxes)
192
+ image = augmentations["image"]
193
+ bboxes = augmentations["bboxes"]
194
+
195
+ # Below assumes 3 scale predictions (as paper) and same num of anchors per scale
196
+ targets = [torch.zeros((self.num_anchors // 3, S, S, 6)) for S in self.S]
197
+ for box in bboxes:
198
+ iou_anchors = iou(torch.tensor(box[2:4]), self.anchors)
199
+ anchor_indices = iou_anchors.argsort(descending=True, dim=0)
200
+ x, y, width, height, class_label = box
201
+ has_anchor = [False] * 3 # each scale should have one anchor
202
+ for anchor_idx in anchor_indices:
203
+ scale_idx = anchor_idx // self.num_anchors_per_scale
204
+ anchor_on_scale = anchor_idx % self.num_anchors_per_scale
205
+ S = self.S[scale_idx]
206
+ i, j = int(S * y), int(S * x) # which cell
207
+ anchor_taken = targets[scale_idx][anchor_on_scale, i, j, 0]
208
+ if not anchor_taken and not has_anchor[scale_idx]:
209
+ targets[scale_idx][anchor_on_scale, i, j, 0] = 1
210
+ x_cell, y_cell = S * x - j, S * y - i # both between [0,1]
211
+ width_cell, height_cell = (
212
+ width * S,
213
+ height * S,
214
+ ) # can be greater than 1 since it's relative to cell
215
+ box_coordinates = torch.tensor(
216
+ [x_cell, y_cell, width_cell, height_cell]
217
+ )
218
+ targets[scale_idx][anchor_on_scale, i, j, 1:5] = box_coordinates
219
+ targets[scale_idx][anchor_on_scale, i, j, 5] = int(class_label)
220
+ has_anchor[scale_idx] = True
221
+
222
+ elif not anchor_taken and iou_anchors[anchor_idx] > self.ignore_iou_thresh:
223
+ targets[scale_idx][anchor_on_scale, i, j, 0] = -1 # ignore prediction
224
+
225
+ return image, tuple(targets)
226
+
loss.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Yolo Loss Function similar to the one in Yolov3 paper,
3
+ the difference from what I can tell is I use CrossEntropy for the classes
4
+ instead of BinaryCrossEntropy.
5
+ """
6
+ import random
7
+ import torch
8
+ import torch.nn as nn
9
+ import lightning.pytorch as pl
10
+ from utils import intersection_over_union
11
+
12
+
13
+ class YoloLoss(pl.LightningModule):
14
+ def __init__(self):
15
+ super().__init__()
16
+ self.mse = nn.MSELoss()
17
+ self.bce = nn.BCEWithLogitsLoss()
18
+ self.entropy = nn.CrossEntropyLoss()
19
+ self.sigmoid = nn.Sigmoid()
20
+
21
+ # Constants signifying how much to pay for each respective part of the loss
22
+ self.lambda_class = 1
23
+ self.lambda_noobj = 10
24
+ self.lambda_obj = 1
25
+ self.lambda_box = 10
26
+
27
+ def forward(self, predictions, target, anchors):
28
+ # Check where obj and noobj (we ignore if target == -1)
29
+ obj = target[..., 0] == 1 # in paper this is Iobj_i
30
+ noobj = target[..., 0] == 0 # in paper this is Inoobj_i
31
+
32
+ # ======================= #
33
+ # FOR NO OBJECT LOSS #
34
+ # ======================= #
35
+
36
+ no_object_loss = self.bce(
37
+ (predictions[..., 0:1][noobj]), (target[..., 0:1][noobj]),
38
+ )
39
+
40
+ # ==================== #
41
+ # FOR OBJECT LOSS #
42
+ # ==================== #
43
+
44
+ anchors = anchors.reshape(1, 3, 1, 1, 2)
45
+ box_preds = torch.cat([self.sigmoid(predictions[..., 1:3]), torch.exp(predictions[..., 3:5]) * anchors], dim=-1)
46
+ ious = intersection_over_union(box_preds[obj], target[..., 1:5][obj]).detach()
47
+ object_loss = self.mse(self.sigmoid(predictions[..., 0:1][obj]), ious * target[..., 0:1][obj])
48
+
49
+ # ======================== #
50
+ # FOR BOX COORDINATES #
51
+ # ======================== #
52
+
53
+ predictions[..., 1:3] = self.sigmoid(predictions[..., 1:3]) # x,y coordinates
54
+ target[..., 3:5] = torch.log(
55
+ (1e-16 + target[..., 3:5] / anchors)
56
+ ) # width, height coordinates
57
+ box_loss = self.mse(predictions[..., 1:5][obj], target[..., 1:5][obj])
58
+
59
+ # ================== #
60
+ # FOR CLASS LOSS #
61
+ # ================== #
62
+
63
+ class_loss = self.entropy(
64
+ (predictions[..., 5:][obj]), (target[..., 5][obj].long()),
65
+ )
66
+
67
+ #print("__________________________________")
68
+ #print(self.lambda_box * box_loss)
69
+ #print(self.lambda_obj * object_loss)
70
+ #print(self.lambda_noobj * no_object_loss)
71
+ #print(self.lambda_class * class_loss)
72
+ #print("\n")
73
+
74
+ return (
75
+ self.lambda_box * box_loss
76
+ + self.lambda_obj * object_loss
77
+ + self.lambda_noobj * no_object_loss
78
+ + self.lambda_class * class_loss
79
+ )
main_yolov3_lightening.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.optim as optim
3
+ import lightning.pytorch as pl
4
+ from pytorch_lightning.utilities.memory import garbage_collection_cuda
5
+
6
+ from yolov3 import YOLOv3
7
+ from loss import YoloLoss
8
+ from utils import get_loaders, load_checkpoint, check_class_accuracy, intersection_over_union
9
+ import config
10
+ from torch.optim.lr_scheduler import OneCycleLR
11
+
12
+
13
+ class YOLOv3Lightning(pl.LightningModule):
14
+ def __init__(self,lr_value=0):
15
+ super().__init__()
16
+ self.config = config
17
+ self.model = YOLOv3(num_classes=self.config.NUM_CLASSES)
18
+ self.loss_fn = YoloLoss()
19
+ if lr_value == 0:
20
+ self.learning_rate = self.config.LEARNING_RATE
21
+ else:
22
+ self.learning_rate = lr_value
23
+
24
+ def forward(self, x):
25
+ return self.model(x)
26
+
27
+ def configure_optimizers(self):
28
+ optimizer = optim.Adam(self.model.parameters(), lr=self.config.LEARNING_RATE, weight_decay=self.config.WEIGHT_DECAY)
29
+ EPOCHS = self.config.NUM_EPOCHS * 2 // 5
30
+ scheduler = OneCycleLR(optimizer, max_lr=1E-3, steps_per_epoch=len(self.train_dataloader()), epochs=EPOCHS, pct_start=5/EPOCHS, div_factor=100, three_phase=False, final_div_factor=100, anneal_strategy='linear')
31
+ return [optimizer], [{"scheduler": scheduler, "interval": "step", "frequency": 1}]
32
+
33
+
34
+ def get_all_loaders(self):
35
+ train_loader, test_loader, val_loader = get_loaders(
36
+ train_csv_path=self.config.DATASET + "/train.csv",
37
+ test_csv_path=self.config.DATASET + "/test.csv",
38
+ )
39
+ return train_loader,test_loader, val_loader
40
+
41
+ def train_dataloader(self):
42
+ train_loader, _, _ = self.get_all_loaders()
43
+ return train_loader
44
+
45
+ def training_step(self, batch, batch_idx):
46
+ x, y = batch
47
+ y0, y1, y2 = (y[0].to(self.device),y[1].to(self.device),y[2].to(self.device))
48
+ out = self(x)
49
+
50
+ loss = (
51
+ self.loss_fn(out[0], y0, self.scaled_anchors[0])
52
+ + self.loss_fn(out[1], y1, self.scaled_anchors[1])
53
+ + self.loss_fn(out[2], y2, self.scaled_anchors[2])
54
+ )
55
+
56
+ self.log('train_loss', loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
57
+ return loss
58
+
59
+ def validation_dataloader(self):
60
+ _, _, val_loader = self.get_all_loaders()
61
+ return val_loader
62
+
63
+ def validation_step(self, batch, batch_idx):
64
+ x, y = batch
65
+ y0, y1, y2 = (
66
+ y[0].to(self.device),
67
+ y[1].to(self.device),
68
+ y[2].to(self.device),
69
+ )
70
+ out = self(x)
71
+ loss = (
72
+ self.loss_fn(out[0], y0, self.scaled_anchors[0])
73
+ + self.loss_fn(out[1], y1, self.scaled_anchors[1])
74
+ + self.loss_fn(out[2], y2, self.scaled_anchors[2])
75
+ )
76
+
77
+ self.log('val_loss', loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
78
+
79
+
80
+ def test_dataloader(self):
81
+ _, test_loader, _ = self.get_all_loaders()
82
+ return test_loader
83
+
84
+ def test_step(self, batch, batch_idx):
85
+ x, y = batch
86
+ y0, y1, y2 = (
87
+ y[0].to(self.device),
88
+ y[1].to(self.device),
89
+ y[2].to(self.device),
90
+ )
91
+ out = self(x)
92
+ loss = (
93
+ self.loss_fn(out[0], y0, self.scaled_anchors[0])
94
+ + self.loss_fn(out[1], y1, self.scaled_anchors[1])
95
+ + self.loss_fn(out[2], y2, self.scaled_anchors[2])
96
+ )
97
+ self.log('test_loss', loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
98
+
99
+ def on_train_start(self):
100
+ if self.config.LOAD_MODEL:
101
+ load_checkpoint(self.config.CHECKPOINT_FILE, self.model, self.optimizers(), self.config.LEARNING_RATE)
102
+ self.scaled_anchors = (
103
+ torch.tensor(self.config.ANCHORS)
104
+ * torch.tensor(self.config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
105
+ ).to(self.device)
106
+
107
+
utils.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import config
2
+ import matplotlib.pyplot as plt
3
+ import matplotlib.patches as patches
4
+ import numpy as np
5
+ import os
6
+ import random
7
+ import torch
8
+
9
+ from collections import Counter
10
+ from torch.utils.data import DataLoader
11
+ # from tqdm import tqdm
12
+ from tqdm.notebook import tqdm
13
+ from lightning.pytorch.tuner import Tuner
14
+
15
+
16
+
17
+ def iou_width_height(boxes1, boxes2):
18
+ """
19
+ Parameters:
20
+ boxes1 (tensor): width and height of the first bounding boxes
21
+ boxes2 (tensor): width and height of the second bounding boxes
22
+ Returns:
23
+ tensor: Intersection over union of the corresponding boxes
24
+ """
25
+ intersection = torch.min(boxes1[..., 0], boxes2[..., 0]) * torch.min(
26
+ boxes1[..., 1], boxes2[..., 1]
27
+ )
28
+ union = (
29
+ boxes1[..., 0] * boxes1[..., 1] + boxes2[..., 0] * boxes2[..., 1] - intersection
30
+ )
31
+ return intersection / union
32
+
33
+
34
+ def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"):
35
+ """
36
+ Video explanation of this function:
37
+ https://youtu.be/XXYG5ZWtjj0
38
+
39
+ This function calculates intersection over union (iou) given pred boxes
40
+ and target boxes.
41
+
42
+ Parameters:
43
+ boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
44
+ boxes_labels (tensor): Correct labels of Bounding Boxes (BATCH_SIZE, 4)
45
+ box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
46
+
47
+ Returns:
48
+ tensor: Intersection over union for all examples
49
+ """
50
+
51
+ if box_format == "midpoint":
52
+ box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
53
+ box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
54
+ box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
55
+ box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
56
+ box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
57
+ box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
58
+ box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
59
+ box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
60
+
61
+ if box_format == "corners":
62
+ box1_x1 = boxes_preds[..., 0:1]
63
+ box1_y1 = boxes_preds[..., 1:2]
64
+ box1_x2 = boxes_preds[..., 2:3]
65
+ box1_y2 = boxes_preds[..., 3:4]
66
+ box2_x1 = boxes_labels[..., 0:1]
67
+ box2_y1 = boxes_labels[..., 1:2]
68
+ box2_x2 = boxes_labels[..., 2:3]
69
+ box2_y2 = boxes_labels[..., 3:4]
70
+
71
+ x1 = torch.max(box1_x1, box2_x1)
72
+ y1 = torch.max(box1_y1, box2_y1)
73
+ x2 = torch.min(box1_x2, box2_x2)
74
+ y2 = torch.min(box1_y2, box2_y2)
75
+
76
+ intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
77
+ box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
78
+ box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
79
+
80
+ return intersection / (box1_area + box2_area - intersection + 1e-6)
81
+
82
+
83
+ def non_max_suppression(bboxes, iou_threshold, threshold, box_format="corners"):
84
+ """
85
+ Video explanation of this function:
86
+ https://youtu.be/YDkjWEN8jNA
87
+
88
+ Does Non Max Suppression given bboxes
89
+
90
+ Parameters:
91
+ bboxes (list): list of lists containing all bboxes with each bboxes
92
+ specified as [class_pred, prob_score, x1, y1, x2, y2]
93
+ iou_threshold (float): threshold where predicted bboxes is correct
94
+ threshold (float): threshold to remove predicted bboxes (independent of IoU)
95
+ box_format (str): "midpoint" or "corners" used to specify bboxes
96
+
97
+ Returns:
98
+ list: bboxes after performing NMS given a specific IoU threshold
99
+ """
100
+
101
+ assert type(bboxes) == list
102
+
103
+ bboxes = [box for box in bboxes if box[1] > threshold]
104
+ bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
105
+ bboxes_after_nms = []
106
+
107
+ while bboxes:
108
+ chosen_box = bboxes.pop(0)
109
+
110
+ bboxes = [
111
+ box
112
+ for box in bboxes
113
+ if box[0] != chosen_box[0]
114
+ or intersection_over_union(
115
+ torch.tensor(chosen_box[2:]),
116
+ torch.tensor(box[2:]),
117
+ box_format=box_format,
118
+ )
119
+ < iou_threshold
120
+ ]
121
+
122
+ bboxes_after_nms.append(chosen_box)
123
+
124
+ return bboxes_after_nms
125
+
126
+
127
+ def mean_average_precision(
128
+ pred_boxes, true_boxes, iou_threshold=0.5, box_format="midpoint", num_classes=20
129
+ ):
130
+ """
131
+ Video explanation of this function:
132
+ https://youtu.be/FppOzcDvaDI
133
+
134
+ This function calculates mean average precision (mAP)
135
+
136
+ Parameters:
137
+ pred_boxes (list): list of lists containing all bboxes with each bboxes
138
+ specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2]
139
+ true_boxes (list): Similar as pred_boxes except all the correct ones
140
+ iou_threshold (float): threshold where predicted bboxes is correct
141
+ box_format (str): "midpoint" or "corners" used to specify bboxes
142
+ num_classes (int): number of classes
143
+
144
+ Returns:
145
+ float: mAP value across all classes given a specific IoU threshold
146
+ """
147
+
148
+ # list storing all AP for respective classes
149
+ average_precisions = []
150
+
151
+ # used for numerical stability later on
152
+ epsilon = 1e-6
153
+
154
+ for c in range(num_classes):
155
+ detections = []
156
+ ground_truths = []
157
+
158
+ # Go through all predictions and targets,
159
+ # and only add the ones that belong to the
160
+ # current class c
161
+ for detection in pred_boxes:
162
+ if detection[1] == c:
163
+ detections.append(detection)
164
+
165
+ for true_box in true_boxes:
166
+ if true_box[1] == c:
167
+ ground_truths.append(true_box)
168
+
169
+ # find the amount of bboxes for each training example
170
+ # Counter here finds how many ground truth bboxes we get
171
+ # for each training example, so let's say img 0 has 3,
172
+ # img 1 has 5 then we will obtain a dictionary with:
173
+ # amount_bboxes = {0:3, 1:5}
174
+ amount_bboxes = Counter([gt[0] for gt in ground_truths])
175
+
176
+ # We then go through each key, val in this dictionary
177
+ # and convert to the following (w.r.t same example):
178
+ # ammount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]}
179
+ for key, val in amount_bboxes.items():
180
+ amount_bboxes[key] = torch.zeros(val)
181
+
182
+ # sort by box probabilities which is index 2
183
+ detections.sort(key=lambda x: x[2], reverse=True)
184
+ TP = torch.zeros((len(detections)))
185
+ FP = torch.zeros((len(detections)))
186
+ total_true_bboxes = len(ground_truths)
187
+
188
+ # If none exists for this class then we can safely skip
189
+ if total_true_bboxes == 0:
190
+ continue
191
+
192
+ for detection_idx, detection in enumerate(detections):
193
+ # Only take out the ground_truths that have the same
194
+ # training idx as detection
195
+ ground_truth_img = [
196
+ bbox for bbox in ground_truths if bbox[0] == detection[0]
197
+ ]
198
+
199
+ num_gts = len(ground_truth_img)
200
+ best_iou = 0
201
+
202
+ for idx, gt in enumerate(ground_truth_img):
203
+ iou = intersection_over_union(
204
+ torch.tensor(detection[3:]),
205
+ torch.tensor(gt[3:]),
206
+ box_format=box_format,
207
+ )
208
+
209
+ if iou > best_iou:
210
+ best_iou = iou
211
+ best_gt_idx = idx
212
+
213
+ if best_iou > iou_threshold:
214
+ # only detect ground truth detection once
215
+ if amount_bboxes[detection[0]][best_gt_idx] == 0:
216
+ # true positive and add this bounding box to seen
217
+ TP[detection_idx] = 1
218
+ amount_bboxes[detection[0]][best_gt_idx] = 1
219
+ else:
220
+ FP[detection_idx] = 1
221
+
222
+ # if IOU is lower then the detection is a false positive
223
+ else:
224
+ FP[detection_idx] = 1
225
+
226
+ TP_cumsum = torch.cumsum(TP, dim=0)
227
+ FP_cumsum = torch.cumsum(FP, dim=0)
228
+ recalls = TP_cumsum / (total_true_bboxes + epsilon)
229
+ precisions = TP_cumsum / (TP_cumsum + FP_cumsum + epsilon)
230
+ precisions = torch.cat((torch.tensor([1]), precisions))
231
+ recalls = torch.cat((torch.tensor([0]), recalls))
232
+ # torch.trapz for numerical integration
233
+ average_precisions.append(torch.trapz(precisions, recalls))
234
+
235
+ return sum(average_precisions) / len(average_precisions)
236
+
237
+
238
+ def plot_image(image, boxes):
239
+ """Plots predicted bounding boxes on the image"""
240
+ cmap = plt.get_cmap("tab20b")
241
+ class_labels = config.COCO_LABELS if config.DATASET=='COCO' else config.PASCAL_CLASSES
242
+ colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))]
243
+ im = np.array(image)
244
+ height, width, _ = im.shape
245
+
246
+ # Create figure and axes
247
+ fig, ax = plt.subplots(1)
248
+ # Display the image
249
+ ax.imshow(im)
250
+
251
+ # box[0] is x midpoint, box[2] is width
252
+ # box[1] is y midpoint, box[3] is height
253
+
254
+ # Create a Rectangle patch
255
+ for box in boxes:
256
+ assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
257
+ class_pred = box[0]
258
+ box = box[2:]
259
+ upper_left_x = box[0] - box[2] / 2
260
+ upper_left_y = box[1] - box[3] / 2
261
+ rect = patches.Rectangle(
262
+ (upper_left_x * width, upper_left_y * height),
263
+ box[2] * width,
264
+ box[3] * height,
265
+ linewidth=2,
266
+ edgecolor=colors[int(class_pred)],
267
+ facecolor="none",
268
+ )
269
+ # Add the patch to the Axes
270
+ ax.add_patch(rect)
271
+ plt.text(
272
+ upper_left_x * width,
273
+ upper_left_y * height,
274
+ s=class_labels[int(class_pred)],
275
+ color="white",
276
+ verticalalignment="top",
277
+ bbox={"color": colors[int(class_pred)], "pad": 0},
278
+ )
279
+
280
+ plt.show()
281
+
282
+
283
+ def get_evaluation_bboxes(
284
+ loader,
285
+ model,
286
+ iou_threshold,
287
+ anchors,
288
+ threshold,
289
+ box_format="midpoint",
290
+ device="cuda",
291
+ ):
292
+ # make sure model is in eval before get bboxes
293
+ model.eval()
294
+ train_idx = 0
295
+ all_pred_boxes = []
296
+ all_true_boxes = []
297
+ for batch_idx, (x, labels) in enumerate(tqdm(loader)):
298
+ x = x.to(device)
299
+
300
+ with torch.no_grad():
301
+ predictions = model(x)
302
+
303
+ batch_size = x.shape[0]
304
+ bboxes = [[] for _ in range(batch_size)]
305
+ for i in range(3):
306
+ S = predictions[i].shape[2]
307
+ anchor = torch.tensor([*anchors[i]]).to(device) * S
308
+ boxes_scale_i = cells_to_bboxes(
309
+ predictions[i], anchor, S=S, is_preds=True
310
+ )
311
+ for idx, (box) in enumerate(boxes_scale_i):
312
+ bboxes[idx] += box
313
+
314
+ # we just want one bbox for each label, not one for each scale
315
+ true_bboxes = cells_to_bboxes(
316
+ labels[2], anchor, S=S, is_preds=False
317
+ )
318
+
319
+ for idx in range(batch_size):
320
+ nms_boxes = non_max_suppression(
321
+ bboxes[idx],
322
+ iou_threshold=iou_threshold,
323
+ threshold=threshold,
324
+ box_format=box_format,
325
+ )
326
+
327
+ for nms_box in nms_boxes:
328
+ all_pred_boxes.append([train_idx] + nms_box)
329
+
330
+ for box in true_bboxes[idx]:
331
+ if box[1] > threshold:
332
+ all_true_boxes.append([train_idx] + box)
333
+
334
+ train_idx += 1
335
+
336
+ model.train()
337
+ return all_pred_boxes, all_true_boxes
338
+
339
+
340
+ def cells_to_bboxes(predictions, anchors, S, is_preds=True):
341
+ """
342
+ Scales the predictions coming from the model to
343
+ be relative to the entire image such that they for example later
344
+ can be plotted or.
345
+ INPUT:
346
+ predictions: tensor of size (N, 3, S, S, num_classes+5)
347
+ anchors: the anchors used for the predictions
348
+ S: the number of cells the image is divided in on the width (and height)
349
+ is_preds: whether the input is predictions or the true bounding boxes
350
+ OUTPUT:
351
+ converted_bboxes: the converted boxes of sizes (N, num_anchors, S, S, 1+5) with class index,
352
+ object score, bounding box coordinates
353
+ """
354
+ BATCH_SIZE = predictions.shape[0]
355
+ num_anchors = len(anchors)
356
+ box_predictions = predictions[..., 1:5]
357
+ if is_preds:
358
+ anchors = anchors.reshape(1, len(anchors), 1, 1, 2)
359
+ box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2])
360
+ box_predictions[..., 2:] = torch.exp(box_predictions[..., 2:]) * anchors
361
+ scores = torch.sigmoid(predictions[..., 0:1])
362
+ best_class = torch.argmax(predictions[..., 5:], dim=-1).unsqueeze(-1)
363
+ else:
364
+ scores = predictions[..., 0:1]
365
+ best_class = predictions[..., 5:6]
366
+
367
+ cell_indices = (
368
+ torch.arange(S)
369
+ .repeat(predictions.shape[0], 3, S, 1)
370
+ .unsqueeze(-1)
371
+ .to(predictions.device)
372
+ )
373
+ x = 1 / S * (box_predictions[..., 0:1] + cell_indices)
374
+ y = 1 / S * (box_predictions[..., 1:2] + cell_indices.permute(0, 1, 3, 2, 4))
375
+ w_h = 1 / S * box_predictions[..., 2:4]
376
+ converted_bboxes = torch.cat((best_class, scores, x, y, w_h), dim=-1).reshape(BATCH_SIZE, num_anchors * S * S, 6)
377
+ return converted_bboxes.tolist()
378
+
379
+ def check_class_accuracy(model, loader, threshold):
380
+ model.eval()
381
+ tot_class_preds, correct_class = 0, 0
382
+ tot_noobj, correct_noobj = 0, 0
383
+ tot_obj, correct_obj = 0, 0
384
+
385
+ for idx, (x, y) in enumerate(tqdm(loader)):
386
+ x = x.to(config.DEVICE)
387
+ with torch.no_grad():
388
+ out = model(x)
389
+
390
+ for i in range(3):
391
+ y[i] = y[i].to(config.DEVICE)
392
+ obj = y[i][..., 0] == 1 # in paper this is Iobj_i
393
+ noobj = y[i][..., 0] == 0 # in paper this is Iobj_i
394
+
395
+ correct_class += torch.sum(
396
+ torch.argmax(out[i][..., 5:][obj], dim=-1) == y[i][..., 5][obj]
397
+ )
398
+ tot_class_preds += torch.sum(obj)
399
+
400
+ obj_preds = torch.sigmoid(out[i][..., 0]) > threshold
401
+ correct_obj += torch.sum(obj_preds[obj] == y[i][..., 0][obj])
402
+ tot_obj += torch.sum(obj)
403
+ correct_noobj += torch.sum(obj_preds[noobj] == y[i][..., 0][noobj])
404
+ tot_noobj += torch.sum(noobj)
405
+
406
+ # print(f"Class accuracy is: {(correct_class/(tot_class_preds+1e-16))*100:2f}%")
407
+ # print(f"No obj accuracy is: {(correct_noobj/(tot_noobj+1e-16))*100:2f}%")
408
+ # print(f"Obj accuracy is: {(correct_obj/(tot_obj+1e-16))*100:2f}%")
409
+ model.train()
410
+ class_acc = (correct_class / (tot_class_preds + 1e-16)) * 100
411
+ no_obj_acc = (correct_noobj / (tot_noobj + 1e-16)) * 100
412
+ obj_acc = (correct_obj / (tot_obj + 1e-16)) * 100
413
+ return class_acc, no_obj_acc, obj_acc
414
+
415
+ def get_mean_std(loader):
416
+ # var[X] = E[X**2] - E[X]**2
417
+ channels_sum, channels_sqrd_sum, num_batches = 0, 0, 0
418
+
419
+ for data, _ in tqdm(loader):
420
+ channels_sum += torch.mean(data, dim=[0, 2, 3])
421
+ channels_sqrd_sum += torch.mean(data ** 2, dim=[0, 2, 3])
422
+ num_batches += 1
423
+
424
+ mean = channels_sum / num_batches
425
+ std = (channels_sqrd_sum / num_batches - mean ** 2) ** 0.5
426
+
427
+ return mean, std
428
+
429
+
430
+ def save_checkpoint(model, optimizer, filename="my_checkpoint.pth.tar"):
431
+ print("=> Saving checkpoint")
432
+ checkpoint = {
433
+ "state_dict": model.state_dict(),
434
+ "optimizer": optimizer.state_dict(),
435
+ }
436
+ torch.save(checkpoint, filename)
437
+
438
+
439
+ def load_checkpoint(checkpoint_file, model, optimizer, lr):
440
+ print("=> Loading checkpoint")
441
+ checkpoint = torch.load(checkpoint_file, map_location=config.DEVICE)
442
+ model.load_state_dict(checkpoint["state_dict"])
443
+ optimizer.load_state_dict(checkpoint["optimizer"])
444
+
445
+ # If we don't do this then it will just have learning rate of old checkpoint
446
+ # and it will lead to many hours of debugging \:
447
+ for param_group in optimizer.param_groups:
448
+ param_group["lr"] = lr
449
+
450
+
451
+ def get_loaders(train_csv_path, test_csv_path):
452
+ from dataset import YOLOTrainDataset, YOLOTestDataset
453
+
454
+ IMAGE_SIZE = config.IMAGE_SIZE
455
+ train_dataset = YOLOTrainDataset(
456
+ train_csv_path,
457
+ transform=config.train_transforms,
458
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
459
+ img_dir=config.IMG_DIR,
460
+ label_dir=config.LABEL_DIR,
461
+ anchors=config.ANCHORS,
462
+ )
463
+ test_dataset = YOLOTestDataset(
464
+ test_csv_path,
465
+ transform=config.test_transforms,
466
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
467
+ img_dir=config.IMG_DIR,
468
+ label_dir=config.LABEL_DIR,
469
+ anchors=config.ANCHORS,
470
+ )
471
+ train_loader = DataLoader(
472
+ dataset=train_dataset,
473
+ batch_size=config.BATCH_SIZE,
474
+ num_workers=config.NUM_WORKERS,
475
+ pin_memory=config.PIN_MEMORY,
476
+ shuffle=True,
477
+ drop_last=False,
478
+ )
479
+ test_loader = DataLoader(
480
+ dataset=test_dataset,
481
+ batch_size=config.BATCH_SIZE,
482
+ num_workers=config.NUM_WORKERS,
483
+ pin_memory=config.PIN_MEMORY,
484
+ shuffle=False,
485
+ drop_last=False,
486
+ )
487
+
488
+ train_eval_dataset = YOLOTestDataset(
489
+ train_csv_path,
490
+ transform=config.test_transforms,
491
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
492
+ img_dir=config.IMG_DIR,
493
+ label_dir=config.LABEL_DIR,
494
+ anchors=config.ANCHORS,
495
+ )
496
+ train_eval_loader = DataLoader(
497
+ dataset=train_eval_dataset,
498
+ batch_size=config.BATCH_SIZE,
499
+ num_workers=config.NUM_WORKERS,
500
+ pin_memory=config.PIN_MEMORY,
501
+ shuffle=False,
502
+ drop_last=False,
503
+ )
504
+
505
+ return train_loader, test_loader, train_eval_loader
506
+
507
+ def plot_couple_examples(model, loader, thresh, iou_thresh, anchors):
508
+ model.eval()
509
+ x, y = next(iter(loader))
510
+ x = x.to("cuda")
511
+ with torch.no_grad():
512
+ out = model(x)
513
+ bboxes = [[] for _ in range(x.shape[0])]
514
+ for i in range(3):
515
+ batch_size, A, S, _, _ = out[i].shape
516
+ anchor = anchors[i]
517
+ boxes_scale_i = cells_to_bboxes(
518
+ out[i], anchor, S=S, is_preds=True
519
+ )
520
+ for idx, (box) in enumerate(boxes_scale_i):
521
+ bboxes[idx] += box
522
+
523
+ model.train()
524
+
525
+ for i in range(batch_size//4):
526
+ nms_boxes = non_max_suppression(
527
+ bboxes[i], iou_threshold=iou_thresh, threshold=thresh, box_format="midpoint",
528
+ )
529
+ plot_image(x[i].permute(1,2,0).detach().cpu(), nms_boxes)
530
+
531
+
532
+
533
+ # def seed_everything(seed=42):
534
+ # os.environ['PYTHONHASHSEED'] = str(seed)
535
+ # random.seed(seed)
536
+ # np.random.seed(seed)
537
+ # torch.manual_seed(seed)
538
+ # torch.cuda.manual_seed(seed)
539
+ # torch.cuda.manual_seed_all(seed)
540
+ # torch.backends.cudnn.deterministic = True
541
+ # torch.backends.cudnn.benchmark = False
542
+
543
+
544
+ def clip_coords(boxes, img_shape):
545
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
546
+ boxes[:, 0].clamp_(0, img_shape[1]) # x1
547
+ boxes[:, 1].clamp_(0, img_shape[0]) # y1
548
+ boxes[:, 2].clamp_(0, img_shape[1]) # x2
549
+ boxes[:, 3].clamp_(0, img_shape[0]) # y2
550
+
551
+ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
552
+ # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
553
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
554
+ y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
555
+ y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
556
+ y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x
557
+ y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y
558
+ return y
559
+
560
+
561
+ def xyn2xy(x, w=640, h=640, padw=0, padh=0):
562
+ # Convert normalized segments into pixel segments, shape (n,2)
563
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
564
+ y[..., 0] = w * x[..., 0] + padw # top left x
565
+ y[..., 1] = h * x[..., 1] + padh # top left y
566
+ return y
567
+
568
+ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
569
+ # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
570
+ if clip:
571
+ clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
572
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
573
+ y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center
574
+ y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center
575
+ y[..., 2] = (x[..., 2] - x[..., 0]) / w # width
576
+ y[..., 3] = (x[..., 3] - x[..., 1]) / h # height
577
+ return y
578
+
579
+ def clip_boxes(boxes, shape):
580
+ # Clip boxes (xyxy) to image shape (height, width)
581
+ if isinstance(boxes, torch.Tensor): # faster individually
582
+ boxes[..., 0].clamp_(0, shape[1]) # x1
583
+ boxes[..., 1].clamp_(0, shape[0]) # y1
584
+ boxes[..., 2].clamp_(0, shape[1]) # x2
585
+ boxes[..., 3].clamp_(0, shape[0]) # y2
586
+ else: # np.array (faster grouped)
587
+ boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
588
+ boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
589
+
590
+
591
+ class LearningRateFinder:
592
+ def __init__(self, trainer, model):
593
+ self.trainer = trainer
594
+ self.model = model
595
+
596
+ def find_and_set_learning_rate(self):
597
+ tuner = Tuner(self.trainer)
598
+ lr_finder = tuner.lr_find(self.model)
599
+
600
+ # Plot the learning rate curve and get a suggestion
601
+ fig = lr_finder.plot(suggest=True)
602
+ fig.show()
603
+
604
+ # Get the suggested new learning rate
605
+ new_lr = lr_finder.suggestion()
606
+
607
+ # Update the learning rate in the model's hyperparameters
608
+ self.model.hparams.learning_rate = new_lr
utils_for_app.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+ import numpy as np
4
+ import cv2
5
+ import random
6
+ import matplotlib.pyplot as plt
7
+ import matplotlib.patches as patches
8
+ from pytorch_grad_cam.base_cam import BaseCAM
9
+ from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection
10
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
11
+ import config
12
+
13
+ def cells_to_bboxes(predictions, anchors, S, is_preds=True):
14
+ """
15
+ Scales the predictions coming from the model to
16
+ be relative to the entire image such that they for example later
17
+ can be plotted or.
18
+ INPUT:
19
+ predictions: tensor of size (N, 3, S, S, num_classes+5)
20
+ anchors: the anchors used for the predictions
21
+ S: the number of cells the image is divided in on the width (and height)
22
+ is_preds: whether the input is predictions or the true bounding boxes
23
+ OUTPUT:
24
+ converted_bboxes: the converted boxes of sizes (N, num_anchors, S, S, 1+5) with class index,
25
+ object score, bounding box coordinates
26
+ """
27
+ BATCH_SIZE = predictions.shape[0]
28
+ num_anchors = len(anchors)
29
+ box_predictions = predictions[..., 1:5]
30
+ if is_preds:
31
+ anchors = anchors.reshape(1, len(anchors), 1, 1, 2)
32
+ box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2])
33
+ box_predictions[..., 2:] = torch.exp(box_predictions[..., 2:]) * anchors
34
+ scores = torch.sigmoid(predictions[..., 0:1])
35
+ best_class = torch.argmax(predictions[..., 5:], dim=-1).unsqueeze(-1)
36
+ else:
37
+ scores = predictions[..., 0:1]
38
+ best_class = predictions[..., 5:6]
39
+
40
+ cell_indices = (
41
+ torch.arange(S)
42
+ .repeat(predictions.shape[0], 3, S, 1)
43
+ .unsqueeze(-1)
44
+ .to(predictions.device)
45
+ )
46
+ x = 1 / S * (box_predictions[..., 0:1] + cell_indices)
47
+ y = 1 / S * (box_predictions[..., 1:2] + cell_indices.permute(0, 1, 3, 2, 4))
48
+ w_h = 1 / S * box_predictions[..., 2:4]
49
+ converted_bboxes = torch.cat((best_class, scores, x, y, w_h), dim=-1).reshape(BATCH_SIZE, num_anchors * S * S, 6)
50
+ return converted_bboxes.tolist()
51
+
52
+
53
+
54
+ def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"):
55
+ """
56
+ Video explanation of this function:
57
+ https://youtu.be/XXYG5ZWtjj0
58
+ This function calculates intersection over union (iou) given pred boxes
59
+ and target boxes.
60
+ Parameters:
61
+ boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
62
+ boxes_labels (tensor): Correct labels of Bounding Boxes (BATCH_SIZE, 4)
63
+ box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
64
+ Returns:
65
+ tensor: Intersection over union for all examples
66
+ """
67
+
68
+ if box_format == "midpoint":
69
+ box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
70
+ box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
71
+ box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
72
+ box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
73
+ box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
74
+ box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
75
+ box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
76
+ box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
77
+
78
+ if box_format == "corners":
79
+ box1_x1 = boxes_preds[..., 0:1]
80
+ box1_y1 = boxes_preds[..., 1:2]
81
+ box1_x2 = boxes_preds[..., 2:3]
82
+ box1_y2 = boxes_preds[..., 3:4]
83
+ box2_x1 = boxes_labels[..., 0:1]
84
+ box2_y1 = boxes_labels[..., 1:2]
85
+ box2_x2 = boxes_labels[..., 2:3]
86
+ box2_y2 = boxes_labels[..., 3:4]
87
+
88
+ x1 = torch.max(box1_x1, box2_x1)
89
+ y1 = torch.max(box1_y1, box2_y1)
90
+ x2 = torch.min(box1_x2, box2_x2)
91
+ y2 = torch.min(box1_y2, box2_y2)
92
+
93
+ intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
94
+ box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
95
+ box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
96
+
97
+ return intersection / (box1_area + box2_area - intersection + 1e-6)
98
+
99
+ def non_max_suppression(bboxes, iou_threshold, threshold, box_format="corners"):
100
+ """
101
+ Video explanation of this function:
102
+ https://youtu.be/YDkjWEN8jNA
103
+ Does Non Max Suppression given bboxes
104
+ Parameters:
105
+ bboxes (list): list of lists containing all bboxes with each bboxes
106
+ specified as [class_pred, prob_score, x1, y1, x2, y2]
107
+ iou_threshold (float): threshold where predicted bboxes is correct
108
+ threshold (float): threshold to remove predicted bboxes (independent of IoU)
109
+ box_format (str): "midpoint" or "corners" used to specify bboxes
110
+ Returns:
111
+ list: bboxes after performing NMS given a specific IoU threshold
112
+ """
113
+
114
+ assert type(bboxes) == list
115
+
116
+ bboxes = [box for box in bboxes if box[1] > threshold]
117
+ bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
118
+ bboxes_after_nms = []
119
+
120
+ while bboxes:
121
+ chosen_box = bboxes.pop(0)
122
+
123
+ bboxes = [
124
+ box
125
+ for box in bboxes
126
+ if box[0] != chosen_box[0]
127
+ or intersection_over_union(
128
+ torch.tensor(chosen_box[2:]),
129
+ torch.tensor(box[2:]),
130
+ box_format=box_format,
131
+ )
132
+ < iou_threshold
133
+ ]
134
+
135
+ bboxes_after_nms.append(chosen_box)
136
+
137
+ return bboxes_after_nms
138
+
139
+ def plot_image(image, boxes):
140
+ """Plots predicted bounding boxes on the image"""
141
+ cmap = plt.get_cmap("tab20b")
142
+ class_labels = config.COCO_LABELS if config.DATASET=='COCO' else config.PASCAL_CLASSES
143
+ colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))]
144
+ im = np.array(image)
145
+ height, width, _ = im.shape
146
+
147
+ # Create figure and axes
148
+ fig, ax = plt.subplots(1)
149
+ # Display the image
150
+ ax.imshow(im)
151
+
152
+ # box[0] is x midpoint, box[2] is width
153
+ # box[1] is y midpoint, box[3] is height
154
+
155
+ # Create a Rectangle patch
156
+ for box in boxes:
157
+ assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
158
+ class_pred = box[0]
159
+ box = box[2:]
160
+ upper_left_x = box[0] - box[2] / 2
161
+ upper_left_y = box[1] - box[3] / 2
162
+ rect = patches.Rectangle(
163
+ (upper_left_x * width, upper_left_y * height),
164
+ box[2] * width,
165
+ box[3] * height,
166
+ linewidth=2,
167
+ edgecolor=colors[int(class_pred)],
168
+ facecolor="none",
169
+ )
170
+ # Add the patch to the Axes
171
+ ax.add_patch(rect)
172
+ plt.text(
173
+ upper_left_x * width,
174
+ upper_left_y * height,
175
+ s=class_labels[int(class_pred)],
176
+ color="white",
177
+ verticalalignment="top",
178
+ bbox={"color": colors[int(class_pred)], "pad": 0},
179
+ )
180
+
181
+ plt.show()
182
+ return fig
183
+
184
+
185
+
186
+ class YoloCAM(BaseCAM):
187
+ def __init__(self, model, target_layers, use_cuda=False,
188
+ reshape_transform=None):
189
+ super(YoloCAM, self).__init__(model,
190
+ target_layers,
191
+ use_cuda,
192
+ reshape_transform,
193
+ uses_gradients=False)
194
+
195
+ def forward(self,
196
+ input_tensor: torch.Tensor,
197
+ scaled_anchors: torch.Tensor,
198
+ targets: List[torch.nn.Module],
199
+ eigen_smooth: bool = False) -> np.ndarray:
200
+
201
+ if self.cuda:
202
+ input_tensor = input_tensor.cuda()
203
+
204
+ if self.compute_input_gradient:
205
+ input_tensor = torch.autograd.Variable(input_tensor,
206
+ requires_grad=True)
207
+
208
+ outputs = self.activations_and_grads(input_tensor)
209
+ if targets is None:
210
+ bboxes = [[] for _ in range(1)]
211
+ for i in range(3):
212
+ batch_size, A, S, _, _ = outputs[i].shape
213
+ anchor = scaled_anchors[i]
214
+ boxes_scale_i = cells_to_bboxes(
215
+ outputs[i], anchor, S=S, is_preds=True
216
+ )
217
+ for idx, (box) in enumerate(boxes_scale_i):
218
+ bboxes[idx] += box
219
+
220
+ nms_boxes = non_max_suppression(
221
+ bboxes[0], iou_threshold=0.5, threshold=0.4, box_format="midpoint",
222
+ )
223
+ # target_categories = np.argmax(outputs.cpu().data.numpy(), axis=-1)
224
+ target_categories = [box[0] for box in nms_boxes]
225
+ targets = [ClassifierOutputTarget(
226
+ category) for category in target_categories]
227
+
228
+ if self.uses_gradients:
229
+ self.model.zero_grad()
230
+ loss = sum([target(output)
231
+ for target, output in zip(targets, outputs)])
232
+ loss.backward(retain_graph=True)
233
+
234
+ # In most of the saliency attribution papers, the saliency is
235
+ # computed with a single target layer.
236
+ # Commonly it is the last convolutional layer.
237
+ # Here we support passing a list with multiple target layers.
238
+ # It will compute the saliency image for every image,
239
+ # and then aggregate them (with a default mean aggregation).
240
+ # This gives you more flexibility in case you just want to
241
+ # use all conv layers for example, all Batchnorm layers,
242
+ # or something else.
243
+ cam_per_layer = self.compute_cam_per_layer(input_tensor,
244
+ targets,
245
+ eigen_smooth)
246
+ return self.aggregate_multi_layers(cam_per_layer)
247
+
248
+ def get_cam_image(self,
249
+ input_tensor,
250
+ target_layer,
251
+ target_category,
252
+ activations,
253
+ grads,
254
+ eigen_smooth):
255
+ return get_2d_projection(activations)