jaiyeshchahar commited on
Commit
5d3cdc9
·
1 Parent(s): 99957fc

Upload 11 files

Browse files
Files changed (11) hide show
  1. app.py +87 -0
  2. callbacks.py +84 -0
  3. config.py +115 -0
  4. custom_yolo_model.pth +3 -0
  5. dataset.py +225 -0
  6. loss.py +79 -0
  7. model.py +177 -0
  8. requirements.txt +12 -0
  9. utils.py +707 -0
  10. utils_app.py +246 -0
  11. yolo_lightning.py +106 -0
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.optim as optim
3
+ import lightning.pytorch as pl
4
+ from lightning.pytorch.tuner import Tuner
5
+ from tqdm import tqdm
6
+ from torch.optim.lr_scheduler import OneCycleLR
7
+ import matplotlib.pyplot as plt
8
+ import matplotlib.patches as patches
9
+ import albumentations as A
10
+ from pytorch_grad_cam.utils.image import show_cam_on_image
11
+ from albumentations.pytorch import ToTensorV2
12
+ import config
13
+ from yolo_lightning import YOLOv3Lightning
14
+ import torch
15
+ import cv2
16
+ import numpy as np
17
+ import gradio as gr
18
+ import os
19
+ from utils_app import *
20
+
21
+ model = YOLOv3Lightning (config)
22
+
23
+ model.load_state_dict(torch.load("custom_yolo_model.pth", map_location=torch.device('cpu')), strict=False)
24
+ model.setup(stage="test")
25
+
26
+ IMAGE_SIZE = 416
27
+
28
+ ANCHORS = [
29
+ [(0.28, 0.22), (0.38, 0.48), (0.9, 0.78)],
30
+ [(0.07, 0.15), (0.15, 0.11), (0.14, 0.29)],
31
+ [(0.02, 0.03), (0.04, 0.07), (0.08, 0.06)],
32
+ ] # Note these have been rescaled to be between [0, 1]
33
+ S = [IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8]
34
+
35
+ scaled_anchors = (
36
+ torch.tensor(config.ANCHORS)
37
+ * torch.tensor(config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
38
+ )
39
+
40
+ def process_image_and_plot(image, model, scaled_anchors):
41
+
42
+ transformed_image = config.transforms(image=image)["image"].unsqueeze(0)
43
+ output = model(transformed_image)
44
+ bboxes = [[] for _ in range(1)]
45
+
46
+ for i in range(3):
47
+ batch_size, A, S, _, _ = output[i].shape
48
+ anchor = scaled_anchors[i]
49
+ boxes_scale_i = cells_to_bboxes(output[i], anchor, S=S, is_preds=True)
50
+ for idx, box in enumerate(boxes_scale_i):
51
+ bboxes[idx] += box
52
+
53
+ nms_boxes = non_max_suppression(
54
+ bboxes[0], iou_threshold=0.5, threshold=0.4, box_format="midpoint",
55
+ )
56
+ fig = plot_image(transformed_image[0].permute(1, 2, 0), nms_boxes)
57
+
58
+ cam = YoloCAM(model=model, target_layers=[model.model.layers[-2]], use_cuda=False)
59
+ grayscale_cam = cam(transformed_image, scaled_anchors)[0, :, :]
60
+ img = cv2.resize(image, (416, 416))
61
+ img = np.float32(img) / 255
62
+ cam_image = show_cam_on_image(img, grayscale_cam, use_rgb=True)
63
+
64
+ return fig,cam_image
65
+
66
+
67
+ examples = [
68
+ ["images/2012_004288.jpg"],
69
+ ["images/2012_004314.jpg"],
70
+ ["images/car.jpg"],
71
+ ]
72
+ def processed_image(image):
73
+ figure,gradcam = process_image_and_plot(image, model, scaled_anchors)
74
+ return figure,gradcam
75
+
76
+ title = "YoloV3 on Pascal VOC Dataset (GradCAM)"
77
+ description = f"Pytorch Implemetation of YoloV3 trained from scratch on Pascal VOC dataset with GradCAM \n Class in pascol voc: {', '.join(config.PASCAL_CLASSES)}"
78
+ demo = gr.Interface(processed_image,
79
+ inputs=[
80
+ gr.Image(label="Input Image"),
81
+ ],
82
+ outputs=[gr.Plot(),gr.Image(shape=(32, 32), label="GradCAM Prediction")],
83
+ title=title,
84
+ description=description,
85
+ examples=examples,
86
+ )
87
+ demo.launch()
callbacks.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import lightning.pytorch as pl
2
+ import config
3
+ from utils import (check_class_accuracy,get_evaluation_bboxes,mean_average_precision,plot_couple_examples)
4
+ from lightning.pytorch.callbacks import Callback
5
+
6
+
7
+ class plot_examples_callback(Callback):
8
+ def __init__(self, epoch_interval: int = 5) -> None:
9
+ super().__init__()
10
+ self.epoch_interval = epoch_interval
11
+
12
+ def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
13
+ if (trainer.current_epoch + 1) % self.epoch_interval == 0:
14
+ plot_couple_examples(
15
+ model=pl_module,
16
+ loader=pl_module.train_dataloader(),
17
+ thresh=0.6,
18
+ iou_thresh=0.5,
19
+ anchors=pl_module.scaled_anchors,
20
+ )
21
+
22
+
23
+ class class_accuracy_callback(pl.Callback):
24
+ def __init__(self, train_epoch_interval: int = 1, test_epoch_interval: int = 10) -> None:
25
+ super().__init__()
26
+ self.train_epoch_interval = train_epoch_interval
27
+ self.test_epoch_interval = test_epoch_interval
28
+
29
+ def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
30
+ if (trainer.current_epoch + 1) % self.train_epoch_interval == 0:
31
+ class_acc, no_obj_acc, obj_acc = check_class_accuracy(model=pl_module, loader=pl_module.train_dataloader(), threshold=config.CONF_THRESHOLD)
32
+ class_acc = round(class_acc.item(),2)
33
+ no_obj_acc = round(no_obj_acc.item(),2)
34
+ obj_acc = round(obj_acc.item(),2)
35
+
36
+ pl_module.log_dict(
37
+ {
38
+ "train_class_acc": class_acc,
39
+ "train_no_obj_acc": no_obj_acc,
40
+ "train_obj_acc": obj_acc,
41
+ },
42
+ logger=True,
43
+ )
44
+ print(f"Logged on A100 GPU's Colab- Adil Jaleel")
45
+ print(f"Epoch Number: {trainer.current_epoch + 1}")
46
+ print("Train Metrics")
47
+ print(f"Loss: {trainer.callback_metrics['train_loss_epoch']}")
48
+ print(f"Class Accuracy: {class_acc:2f}%")
49
+ print(f"No Object Accuracy: {no_obj_acc:2f}%")
50
+ print(f"Object Accuracy: {obj_acc:2f}%")
51
+
52
+ if (trainer.current_epoch + 1) % self.test_epoch_interval == 0:
53
+ class_acc, no_obj_acc, obj_acc = check_class_accuracy(model=pl_module, loader=pl_module.test_dataloader(), threshold=config.CONF_THRESHOLD)
54
+ class_acc = round(class_acc.item(),2)
55
+ no_obj_acc = round(no_obj_acc.item(),2)
56
+ obj_acc = round(obj_acc.item(),2)
57
+
58
+ pl_module.log_dict(
59
+ {
60
+ "test_class_acc": class_acc,
61
+ "test_no_obj_acc": no_obj_acc,
62
+ "test_obj_acc": obj_acc,
63
+ },
64
+ logger=True,
65
+ )
66
+
67
+ print("Test Metrics")
68
+ print(f"Class Accuracy: {class_acc:2f}%")
69
+ print(f"No Object Accuracy: {no_obj_acc:2f}%")
70
+ print(f"Object Accuracy: {obj_acc:2f}%")
71
+
72
+ class map_callback(pl.Callback):
73
+ def __init__(self, epoch_interval: int = 10) -> None:
74
+ super().__init__()
75
+ self.epoch_interval = epoch_interval
76
+
77
+ def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
78
+ if (trainer.current_epoch + 1) % self.epoch_interval == 0:
79
+ pred_boxes, true_boxes = get_evaluation_bboxes(loader=pl_module.test_dataloader(), model=pl_module, iou_threshold=config.NMS_IOU_THRESH, anchors=config.ANCHORS, threshold=config.CONF_THRESHOLD, device=config.DEVICE,)
80
+
81
+ map_val = mean_average_precision(pred_boxes=pred_boxes, true_boxes=true_boxes, iou_threshold=config.MAP_IOU_THRESH, box_format="midpoint", num_classes=config.NUM_CLASSES)
82
+ print("MAP: ", map_val.item())
83
+ pl_module.log("MAP",map_val.item(),logger=True)
84
+ pl_module.train()
config.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import albumentations as A
2
+ import cv2
3
+ import torch
4
+ import os
5
+ from albumentations.pytorch import ToTensorV2
6
+ # from utils import seed_everything
7
+
8
+ DATASET = 'PASCAL_VOC'
9
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
10
+ # seed_everything() # If you want deterministic behavior
11
+ NUM_WORKERS = os.cpu_count()-1
12
+ BATCH_SIZE = 16
13
+ IMAGE_SIZE = 416
14
+ NUM_CLASSES = 20
15
+ LEARNING_RATE = 1e-5
16
+ WEIGHT_DECAY = 1e-4
17
+ NUM_EPOCHS = 100
18
+ CONF_THRESHOLD = 0.05
19
+ MAP_IOU_THRESH = 0.5
20
+ NMS_IOU_THRESH = 0.45
21
+ S = [IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8]
22
+ PIN_MEMORY = True
23
+ LOAD_MODEL = False
24
+ SAVE_MODEL = True
25
+ CHECKPOINT_FILE = "checkpoint.pth.tar"
26
+ IMG_DIR = DATASET + "/images/"
27
+ LABEL_DIR = DATASET + "/labels/"
28
+ CHECKPOINT_PATH = "checkpoints/"
29
+
30
+ ANCHORS = [
31
+ [(0.28, 0.22), (0.38, 0.48), (0.9, 0.78)],
32
+ [(0.07, 0.15), (0.15, 0.11), (0.14, 0.29)],
33
+ [(0.02, 0.03), (0.04, 0.07), (0.08, 0.06)],
34
+ ] # Note these have been rescaled to be between [0, 1]
35
+
36
+ means = [0.485, 0.456, 0.406]
37
+
38
+ scale = 1.1
39
+ train_transforms = A.Compose(
40
+ [
41
+ A.LongestMaxSize(max_size=int(IMAGE_SIZE * scale)),
42
+ A.PadIfNeeded(
43
+ min_height=int(IMAGE_SIZE * scale),
44
+ min_width=int(IMAGE_SIZE * scale),
45
+ border_mode=cv2.BORDER_CONSTANT,
46
+ ),
47
+ A.Rotate(limit = 10, interpolation=1, border_mode=4),
48
+ A.RandomCrop(width=IMAGE_SIZE, height=IMAGE_SIZE),
49
+ A.ColorJitter(brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6, p=0.4),
50
+ A.OneOf(
51
+ [
52
+ A.ShiftScaleRotate(
53
+ rotate_limit=20, p=0.5, border_mode=cv2.BORDER_CONSTANT
54
+ ),
55
+ # A.Affine(shear=15, p=0.5, mode="constant"),
56
+ ],
57
+ p=1.0,
58
+ ),
59
+ A.HorizontalFlip(p=0.5),
60
+ A.Blur(p=0.1),
61
+ A.CLAHE(p=0.1),
62
+ A.Posterize(p=0.1),
63
+ A.ToGray(p=0.1),
64
+ A.ChannelShuffle(p=0.05),
65
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
66
+ ToTensorV2(),
67
+ ],
68
+ bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[],),
69
+ )
70
+ test_transforms = A.Compose(
71
+ [
72
+ A.LongestMaxSize(max_size=IMAGE_SIZE),
73
+ A.PadIfNeeded(
74
+ min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
75
+ ),
76
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
77
+ ToTensorV2(),
78
+ ],
79
+ bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[]),
80
+ )
81
+
82
+ transforms = A.Compose(
83
+ [
84
+ A.LongestMaxSize(max_size=IMAGE_SIZE),
85
+ A.PadIfNeeded(
86
+ min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
87
+ ),
88
+ A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
89
+ ToTensorV2(),
90
+ ],
91
+ )
92
+
93
+ PASCAL_CLASSES = [
94
+ "aeroplane",
95
+ "bicycle",
96
+ "bird",
97
+ "boat",
98
+ "bottle",
99
+ "bus",
100
+ "car",
101
+ "cat",
102
+ "chair",
103
+ "cow",
104
+ "diningtable",
105
+ "dog",
106
+ "horse",
107
+ "motorbike",
108
+ "person",
109
+ "pottedplant",
110
+ "sheep",
111
+ "sofa",
112
+ "train",
113
+ "tvmonitor"
114
+ ]
115
+
custom_yolo_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f24530b09b71c8d8dad829159a9e3cdf0bdee74e505257777f495e2b4fdf3bdf
3
+ size 246879805
dataset.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Creates a Pytorch dataset to load the Pascal VOC & MS COCO datasets
3
+ """
4
+
5
+ import config
6
+ import numpy as np
7
+ import os
8
+ import pandas as pd
9
+ import torch
10
+ from utils import xywhn2xyxy, xyxy2xywhn
11
+ import random
12
+
13
+ from PIL import Image, ImageFile
14
+ from torch.utils.data import Dataset, DataLoader
15
+ from utils import (
16
+ cells_to_bboxes,
17
+ iou_width_height as iou,
18
+ non_max_suppression as nms,
19
+ plot_image
20
+ )
21
+
22
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
23
+
24
+ class YOLOTrainDataset(Dataset):
25
+ def __init__(
26
+ self,
27
+ csv_file,
28
+ img_dir,
29
+ label_dir,
30
+ anchors,
31
+ image_size=config.IMAGE_SIZE,
32
+ S=[13, 26, 52],
33
+ C=20,
34
+ transform=None,
35
+ ):
36
+ self.annotations = pd.read_csv(csv_file)
37
+ self.img_dir = img_dir
38
+ self.label_dir = label_dir
39
+ self.image_size = image_size
40
+ self.mosaic_border = [image_size // 2, image_size // 2]
41
+ self.transform = transform
42
+ self.S = S
43
+ self.anchors = torch.tensor(anchors[0] + anchors[1] + anchors[2]) # for all 3 scales
44
+ self.num_anchors = self.anchors.shape[0]
45
+ self.num_anchors_per_scale = self.num_anchors // 3
46
+ self.C = C
47
+ self.ignore_iou_thresh = 0.5
48
+ self.counter = 0
49
+
50
+ def __len__(self):
51
+ return len(self.annotations)
52
+
53
+ def load_mosaic(self, index):
54
+ # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
55
+ labels4 = []
56
+ s = self.image_size
57
+ yc, xc = (int(random.uniform(x, 2 * s - x)) for x in self.mosaic_border) # mosaic center x, y
58
+ indices = [index] + random.choices(range(len(self)), k=3) # 3 additional image indices
59
+ random.shuffle(indices)
60
+ for i, index in enumerate(indices):
61
+ # Load image
62
+ label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
63
+ bboxes = np.roll(np.loadtxt(fname=label_path, delimiter=" ", ndmin=2), 4, axis=1).tolist()
64
+ img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
65
+ img = np.array(Image.open(img_path).convert("RGB"))
66
+
67
+
68
+ h, w = img.shape[0], img.shape[1]
69
+ labels = np.array(bboxes)
70
+
71
+ # place img in img4
72
+ if i == 0: # top left
73
+ img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
74
+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
75
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
76
+ elif i == 1: # top right
77
+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
78
+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
79
+ elif i == 2: # bottom left
80
+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
81
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
82
+ elif i == 3: # bottom right
83
+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
84
+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
85
+
86
+ img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
87
+ padw = x1a - x1b
88
+ padh = y1a - y1b
89
+
90
+ # Labels
91
+ if labels.size:
92
+ labels[:, :-1] = xywhn2xyxy(labels[:, :-1], w, h, padw, padh) # normalized xywh to pixel xyxy format
93
+ labels4.append(labels)
94
+
95
+ # Concat/clip labels
96
+ labels4 = np.concatenate(labels4, 0)
97
+ for x in (labels4[:, :-1],):
98
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
99
+ # img4, labels4 = replicate(img4, labels4) # replicate
100
+ labels4[:, :-1] = xyxy2xywhn(labels4[:, :-1], 2 * s, 2 * s)
101
+ labels4[:, :-1] = np.clip(labels4[:, :-1], 0, 1)
102
+ labels4 = labels4[labels4[:, 2] > 0]
103
+ labels4 = labels4[labels4[:, 3] > 0]
104
+ return img4, labels4
105
+
106
+ def __getitem__(self, index):
107
+
108
+ # 75% probability to apply mosaic
109
+ self.counter = (self.counter + 1) % 4
110
+ if self.counter != 0:
111
+ image, bboxes = self.load_mosaic(index)
112
+ # Else, load normally without mosaic
113
+ else:
114
+ # Load image and bbox
115
+ label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
116
+ bboxes = np.roll(np.loadtxt(fname=label_path, delimiter=" ", ndmin=2), 4, axis=1).tolist()
117
+ img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
118
+ image = np.array(Image.open(img_path).convert("RGB"))
119
+
120
+ if self.transform:
121
+ augmentations = self.transform(image=image, bboxes=bboxes)
122
+ image = augmentations["image"]
123
+ bboxes = augmentations["bboxes"]
124
+
125
+ # Below assumes 3 scale predictions (as paper) and same num of anchors per scale
126
+ targets = [torch.zeros((self.num_anchors // 3, S, S, 6)) for S in self.S]
127
+ for box in bboxes:
128
+ iou_anchors = iou(torch.tensor(box[2:4]), self.anchors)
129
+ anchor_indices = iou_anchors.argsort(descending=True, dim=0)
130
+ x, y, width, height, class_label = box
131
+ has_anchor = [False] * 3 # each scale should have one anchor
132
+ for anchor_idx in anchor_indices:
133
+ scale_idx = anchor_idx // self.num_anchors_per_scale
134
+ anchor_on_scale = anchor_idx % self.num_anchors_per_scale
135
+ S = self.S[scale_idx]
136
+ i, j = int(S * y), int(S * x) # which cell
137
+ anchor_taken = targets[scale_idx][anchor_on_scale, i, j, 0]
138
+ if not anchor_taken and not has_anchor[scale_idx]:
139
+ targets[scale_idx][anchor_on_scale, i, j, 0] = 1
140
+ x_cell, y_cell = S * x - j, S * y - i # both between [0,1]
141
+ width_cell, height_cell = (
142
+ width * S,
143
+ height * S,
144
+ ) # can be greater than 1 since it's relative to cell
145
+ box_coordinates = torch.tensor(
146
+ [x_cell, y_cell, width_cell, height_cell]
147
+ )
148
+ targets[scale_idx][anchor_on_scale, i, j, 1:5] = box_coordinates
149
+ targets[scale_idx][anchor_on_scale, i, j, 5] = int(class_label)
150
+ has_anchor[scale_idx] = True
151
+
152
+ elif not anchor_taken and iou_anchors[anchor_idx] > self.ignore_iou_thresh:
153
+ targets[scale_idx][anchor_on_scale, i, j, 0] = -1 # ignore prediction
154
+
155
+ return image, tuple(targets)
156
+
157
+ class YOLOTestDataset(Dataset):
158
+ def __init__(
159
+ self,
160
+ csv_file,
161
+ img_dir,
162
+ label_dir,
163
+ anchors,
164
+ image_size=config.IMAGE_SIZE,
165
+ S=[13, 26, 52],
166
+ C=20,
167
+ transform=None,
168
+ ):
169
+ self.annotations = pd.read_csv(csv_file)
170
+ self.img_dir = img_dir
171
+ self.label_dir = label_dir
172
+ self.image_size = image_size
173
+ self.transform = transform
174
+ self.S = S
175
+ self.anchors = torch.tensor(anchors[0] + anchors[1] + anchors[2]) # for all 3 scales
176
+ self.num_anchors = self.anchors.shape[0]
177
+ self.num_anchors_per_scale = self.num_anchors // 3
178
+ self.C = C
179
+ self.ignore_iou_thresh = 0.5
180
+
181
+ def __len__(self):
182
+ return len(self.annotations)
183
+
184
+ def __getitem__(self, index):
185
+ label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1])
186
+ bboxes = np.roll(np.loadtxt(fname=label_path, delimiter=" ", ndmin=2), 4, axis=1).tolist()
187
+ img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0])
188
+ image = np.array(Image.open(img_path).convert("RGB"))
189
+
190
+ if self.transform:
191
+ augmentations = self.transform(image=image, bboxes=bboxes)
192
+ image = augmentations["image"]
193
+ bboxes = augmentations["bboxes"]
194
+
195
+ # Below assumes 3 scale predictions (as paper) and same num of anchors per scale
196
+ targets = [torch.zeros((self.num_anchors // 3, S, S, 6)) for S in self.S]
197
+ for box in bboxes:
198
+ iou_anchors = iou(torch.tensor(box[2:4]), self.anchors)
199
+ anchor_indices = iou_anchors.argsort(descending=True, dim=0)
200
+ x, y, width, height, class_label = box
201
+ has_anchor = [False] * 3 # each scale should have one anchor
202
+ for anchor_idx in anchor_indices:
203
+ scale_idx = anchor_idx // self.num_anchors_per_scale
204
+ anchor_on_scale = anchor_idx % self.num_anchors_per_scale
205
+ S = self.S[scale_idx]
206
+ i, j = int(S * y), int(S * x) # which cell
207
+ anchor_taken = targets[scale_idx][anchor_on_scale, i, j, 0]
208
+ if not anchor_taken and not has_anchor[scale_idx]:
209
+ targets[scale_idx][anchor_on_scale, i, j, 0] = 1
210
+ x_cell, y_cell = S * x - j, S * y - i # both between [0,1]
211
+ width_cell, height_cell = (
212
+ width * S,
213
+ height * S,
214
+ ) # can be greater than 1 since it's relative to cell
215
+ box_coordinates = torch.tensor(
216
+ [x_cell, y_cell, width_cell, height_cell]
217
+ )
218
+ targets[scale_idx][anchor_on_scale, i, j, 1:5] = box_coordinates
219
+ targets[scale_idx][anchor_on_scale, i, j, 5] = int(class_label)
220
+ has_anchor[scale_idx] = True
221
+
222
+ elif not anchor_taken and iou_anchors[anchor_idx] > self.ignore_iou_thresh:
223
+ targets[scale_idx][anchor_on_scale, i, j, 0] = -1 # ignore prediction
224
+
225
+ return image, tuple(targets)
loss.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Yolo Loss Function similar to the one in Yolov3 paper,
3
+ the difference from what I can tell is I use CrossEntropy for the classes
4
+ instead of BinaryCrossEntropy.
5
+ """
6
+ import random
7
+ import torch
8
+ import torch.nn as nn
9
+ import lightning.pytorch as pl
10
+ from utils import intersection_over_union
11
+
12
+
13
+ class YoloLoss(pl.LightningModule):
14
+ def __init__(self):
15
+ super().__init__()
16
+ self.mse = nn.MSELoss()
17
+ self.bce = nn.BCEWithLogitsLoss()
18
+ self.entropy = nn.CrossEntropyLoss()
19
+ self.sigmoid = nn.Sigmoid()
20
+
21
+ # Constants signifying how much to pay for each respective part of the loss
22
+ self.lambda_class = 1
23
+ self.lambda_noobj = 10
24
+ self.lambda_obj = 1
25
+ self.lambda_box = 10
26
+
27
+ def forward(self, predictions, target, anchors):
28
+ # Check where obj and noobj (we ignore if target == -1)
29
+ obj = target[..., 0] == 1 # in paper this is Iobj_i
30
+ noobj = target[..., 0] == 0 # in paper this is Inoobj_i
31
+
32
+ # ======================= #
33
+ # FOR NO OBJECT LOSS #
34
+ # ======================= #
35
+
36
+ no_object_loss = self.bce(
37
+ (predictions[..., 0:1][noobj]), (target[..., 0:1][noobj]),
38
+ )
39
+
40
+ # ==================== #
41
+ # FOR OBJECT LOSS #
42
+ # ==================== #
43
+
44
+ anchors = anchors.reshape(1, 3, 1, 1, 2)
45
+ box_preds = torch.cat([self.sigmoid(predictions[..., 1:3]), torch.exp(predictions[..., 3:5]) * anchors], dim=-1)
46
+ ious = intersection_over_union(box_preds[obj], target[..., 1:5][obj]).detach()
47
+ object_loss = self.mse(self.sigmoid(predictions[..., 0:1][obj]), ious * target[..., 0:1][obj])
48
+
49
+ # ======================== #
50
+ # FOR BOX COORDINATES #
51
+ # ======================== #
52
+
53
+ predictions[..., 1:3] = self.sigmoid(predictions[..., 1:3]) # x,y coordinates
54
+ target[..., 3:5] = torch.log(
55
+ (1e-16 + target[..., 3:5] / anchors)
56
+ ) # width, height coordinates
57
+ box_loss = self.mse(predictions[..., 1:5][obj], target[..., 1:5][obj])
58
+
59
+ # ================== #
60
+ # FOR CLASS LOSS #
61
+ # ================== #
62
+
63
+ class_loss = self.entropy(
64
+ (predictions[..., 5:][obj]), (target[..., 5][obj].long()),
65
+ )
66
+
67
+ #print("__________________________________")
68
+ #print(self.lambda_box * box_loss)
69
+ #print(self.lambda_obj * object_loss)
70
+ #print(self.lambda_noobj * no_object_loss)
71
+ #print(self.lambda_class * class_loss)
72
+ #print("\n")
73
+
74
+ return (
75
+ self.lambda_box * box_loss
76
+ + self.lambda_obj * object_loss
77
+ + self.lambda_noobj * no_object_loss
78
+ + self.lambda_class * class_loss
79
+ )
model.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of YOLOv3 architecture
3
+ """
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import config
8
+
9
+ """
10
+ Information about architecture config:
11
+ Tuple is structured by (filters, kernel_size, stride)
12
+ Every conv is a same convolution.
13
+ List is structured by "B" indicating a residual block followed by the number of repeats
14
+ "S" is for scale prediction block and computing the yolo loss
15
+ "U" is for upsampling the feature map and concatenating with a previous layer
16
+ """
17
+ config = [
18
+ (32, 3, 1),
19
+ (64, 3, 2),
20
+ ["B", 1],
21
+ (128, 3, 2),
22
+ ["B", 2],
23
+ (256, 3, 2),
24
+ ["B", 8],
25
+ (512, 3, 2),
26
+ ["B", 8],
27
+ (1024, 3, 2),
28
+ ["B", 4], # To this point is Darknet-53
29
+ (512, 1, 1),
30
+ (1024, 3, 1),
31
+ "S",
32
+ (256, 1, 1),
33
+ "U",
34
+ (256, 1, 1),
35
+ (512, 3, 1),
36
+ "S",
37
+ (128, 1, 1),
38
+ "U",
39
+ (128, 1, 1),
40
+ (256, 3, 1),
41
+ "S",
42
+ ]
43
+
44
+
45
+ class CNNBlock(nn.Module):
46
+ def __init__(self, in_channels, out_channels, bn_act=True, **kwargs):
47
+ super().__init__()
48
+ self.conv = nn.Conv2d(in_channels, out_channels, bias=not bn_act, **kwargs)
49
+ self.bn = nn.BatchNorm2d(out_channels)
50
+ self.leaky = nn.LeakyReLU(0.1)
51
+ self.use_bn_act = bn_act
52
+
53
+ def forward(self, x):
54
+ if self.use_bn_act:
55
+ return self.leaky(self.bn(self.conv(x)))
56
+ else:
57
+ return self.conv(x)
58
+
59
+
60
+ class ResidualBlock(nn.Module):
61
+ def __init__(self, channels, use_residual=True, num_repeats=1):
62
+ super().__init__()
63
+ self.layers = nn.ModuleList()
64
+ for repeat in range(num_repeats):
65
+ self.layers += [
66
+ nn.Sequential(
67
+ CNNBlock(channels, channels // 2, kernel_size=1),
68
+ CNNBlock(channels // 2, channels, kernel_size=3, padding=1),
69
+ )
70
+ ]
71
+
72
+ self.use_residual = use_residual
73
+ self.num_repeats = num_repeats
74
+
75
+ def forward(self, x):
76
+ for layer in self.layers:
77
+ if self.use_residual:
78
+ x = x + layer(x)
79
+ else:
80
+ x = layer(x)
81
+
82
+ return x
83
+
84
+
85
+ class ScalePrediction(nn.Module):
86
+ def __init__(self, in_channels, num_classes):
87
+ super().__init__()
88
+ self.pred = nn.Sequential(
89
+ CNNBlock(in_channels, 2 * in_channels, kernel_size=3, padding=1),
90
+ CNNBlock(
91
+ 2 * in_channels, (num_classes + 5) * 3, bn_act=False, kernel_size=1
92
+ ),
93
+ )
94
+ self.num_classes = num_classes
95
+
96
+ def forward(self, x):
97
+ return (
98
+ self.pred(x)
99
+ .reshape(x.shape[0], 3, self.num_classes + 5, x.shape[2], x.shape[3])
100
+ .permute(0, 1, 3, 4, 2)
101
+ )
102
+
103
+
104
+ class YOLOv3(nn.Module):
105
+ def __init__(self, in_channels=3, num_classes=80):
106
+ super().__init__()
107
+ self.num_classes = num_classes
108
+ self.in_channels = in_channels
109
+ self.layers = self._create_conv_layers()
110
+
111
+ def forward(self, x):
112
+ outputs = [] # for each scale
113
+ route_connections = []
114
+ for layer in self.layers:
115
+ if isinstance(layer, ScalePrediction):
116
+ outputs.append(layer(x))
117
+ continue
118
+
119
+ x = layer(x)
120
+
121
+ if isinstance(layer, ResidualBlock) and layer.num_repeats == 8:
122
+ route_connections.append(x)
123
+
124
+ elif isinstance(layer, nn.Upsample):
125
+ x = torch.cat([x, route_connections[-1]], dim=1)
126
+ route_connections.pop()
127
+
128
+ return outputs
129
+
130
+ def _create_conv_layers(self):
131
+ layers = nn.ModuleList()
132
+ in_channels = self.in_channels
133
+
134
+ for module in config:
135
+ if isinstance(module, tuple):
136
+ out_channels, kernel_size, stride = module
137
+ layers.append(
138
+ CNNBlock(
139
+ in_channels,
140
+ out_channels,
141
+ kernel_size=kernel_size,
142
+ stride=stride,
143
+ padding=1 if kernel_size == 3 else 0,
144
+ )
145
+ )
146
+ in_channels = out_channels
147
+
148
+ elif isinstance(module, list):
149
+ num_repeats = module[1]
150
+ layers.append(ResidualBlock(in_channels, num_repeats=num_repeats,))
151
+
152
+ elif isinstance(module, str):
153
+ if module == "S":
154
+ layers += [
155
+ ResidualBlock(in_channels, use_residual=False, num_repeats=1),
156
+ CNNBlock(in_channels, in_channels // 2, kernel_size=1),
157
+ ScalePrediction(in_channels // 2, num_classes=self.num_classes),
158
+ ]
159
+ in_channels = in_channels // 2
160
+
161
+ elif module == "U":
162
+ layers.append(nn.Upsample(scale_factor=2),)
163
+ in_channels = in_channels * 3
164
+
165
+ return layers
166
+
167
+
168
+ if __name__ == "__main__":
169
+ num_classes = 20
170
+ IMAGE_SIZE = config.IMAGE_SIZE
171
+ model = YOLOv3(num_classes=num_classes)
172
+ x = torch.randn((2, 3, IMAGE_SIZE, IMAGE_SIZE))
173
+ out = model(x)
174
+ assert model(x)[0].shape == (2, 3, IMAGE_SIZE//32, IMAGE_SIZE//32, num_classes + 5)
175
+ assert model(x)[1].shape == (2, 3, IMAGE_SIZE//16, IMAGE_SIZE//16, num_classes + 5)
176
+ assert model(x)[2].shape == (2, 3, IMAGE_SIZE//8, IMAGE_SIZE//8, num_classes + 5)
177
+ print("Success!")
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pillow
2
+ numpy
3
+ gradio
4
+ torchsummary
5
+ pytorch-lightning
6
+ grad-cam
7
+ torch-lr-finder
8
+ torch
9
+ torchvision
10
+ numpy
11
+ albumentations
12
+ lightning
utils.py ADDED
@@ -0,0 +1,707 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import config
2
+ import matplotlib.pyplot as plt
3
+ import matplotlib.patches as patches
4
+ import numpy as np
5
+ import os
6
+ import random
7
+ import torch
8
+ from typing import List
9
+ import cv2
10
+ from collections import Counter
11
+ from torch.utils.data import DataLoader
12
+ from tqdm.notebook import tqdm
13
+
14
+ from pytorch_grad_cam.base_cam import BaseCAM
15
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
16
+ from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection
17
+
18
+
19
+ def iou_width_height(boxes1, boxes2):
20
+ """
21
+ Parameters:
22
+ boxes1 (tensor): width and height of the first bounding boxes
23
+ boxes2 (tensor): width and height of the second bounding boxes
24
+ Returns:
25
+ tensor: Intersection over union of the corresponding boxes
26
+ """
27
+ intersection = torch.min(boxes1[..., 0], boxes2[..., 0]) * torch.min(
28
+ boxes1[..., 1], boxes2[..., 1]
29
+ )
30
+ union = (
31
+ boxes1[..., 0] * boxes1[..., 1] + boxes2[..., 0] * boxes2[..., 1] - intersection
32
+ )
33
+ return intersection / union
34
+
35
+
36
+ def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"):
37
+ """
38
+ Video explanation of this function:
39
+ https://youtu.be/XXYG5ZWtjj0
40
+
41
+ This function calculates intersection over union (iou) given pred boxes
42
+ and target boxes.
43
+
44
+ Parameters:
45
+ boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
46
+ boxes_labels (tensor): Correct labels of Bounding Boxes (BATCH_SIZE, 4)
47
+ box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
48
+
49
+ Returns:
50
+ tensor: Intersection over union for all examples
51
+ """
52
+
53
+ if box_format == "midpoint":
54
+ box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
55
+ box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
56
+ box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
57
+ box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
58
+ box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
59
+ box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
60
+ box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
61
+ box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
62
+
63
+ if box_format == "corners":
64
+ box1_x1 = boxes_preds[..., 0:1]
65
+ box1_y1 = boxes_preds[..., 1:2]
66
+ box1_x2 = boxes_preds[..., 2:3]
67
+ box1_y2 = boxes_preds[..., 3:4]
68
+ box2_x1 = boxes_labels[..., 0:1]
69
+ box2_y1 = boxes_labels[..., 1:2]
70
+ box2_x2 = boxes_labels[..., 2:3]
71
+ box2_y2 = boxes_labels[..., 3:4]
72
+
73
+ x1 = torch.max(box1_x1, box2_x1)
74
+ y1 = torch.max(box1_y1, box2_y1)
75
+ x2 = torch.min(box1_x2, box2_x2)
76
+ y2 = torch.min(box1_y2, box2_y2)
77
+
78
+ intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
79
+ box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
80
+ box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
81
+
82
+ return intersection / (box1_area + box2_area - intersection + 1e-6)
83
+
84
+
85
+ def non_max_suppression(bboxes, iou_threshold, threshold, box_format="corners"):
86
+ """
87
+ Video explanation of this function:
88
+ https://youtu.be/YDkjWEN8jNA
89
+
90
+ Does Non Max Suppression given bboxes
91
+
92
+ Parameters:
93
+ bboxes (list): list of lists containing all bboxes with each bboxes
94
+ specified as [class_pred, prob_score, x1, y1, x2, y2]
95
+ iou_threshold (float): threshold where predicted bboxes is correct
96
+ threshold (float): threshold to remove predicted bboxes (independent of IoU)
97
+ box_format (str): "midpoint" or "corners" used to specify bboxes
98
+
99
+ Returns:
100
+ list: bboxes after performing NMS given a specific IoU threshold
101
+ """
102
+
103
+ assert type(bboxes) == list
104
+
105
+ bboxes = [box for box in bboxes if box[1] > threshold]
106
+ bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
107
+ bboxes_after_nms = []
108
+
109
+ while bboxes:
110
+ chosen_box = bboxes.pop(0)
111
+
112
+ bboxes = [
113
+ box
114
+ for box in bboxes
115
+ if box[0] != chosen_box[0]
116
+ or intersection_over_union(
117
+ torch.tensor(chosen_box[2:]),
118
+ torch.tensor(box[2:]),
119
+ box_format=box_format,
120
+ )
121
+ < iou_threshold
122
+ ]
123
+
124
+ bboxes_after_nms.append(chosen_box)
125
+
126
+ return bboxes_after_nms
127
+
128
+
129
+ def mean_average_precision(
130
+ pred_boxes, true_boxes, iou_threshold=0.5, box_format="midpoint", num_classes=20
131
+ ):
132
+ """
133
+ Video explanation of this function:
134
+ https://youtu.be/FppOzcDvaDI
135
+
136
+ This function calculates mean average precision (mAP)
137
+
138
+ Parameters:
139
+ pred_boxes (list): list of lists containing all bboxes with each bboxes
140
+ specified as [train_idx, class_prediction, prob_score, x1, y1, x2, y2]
141
+ true_boxes (list): Similar as pred_boxes except all the correct ones
142
+ iou_threshold (float): threshold where predicted bboxes is correct
143
+ box_format (str): "midpoint" or "corners" used to specify bboxes
144
+ num_classes (int): number of classes
145
+
146
+ Returns:
147
+ float: mAP value across all classes given a specific IoU threshold
148
+ """
149
+
150
+ # list storing all AP for respective classes
151
+ average_precisions = []
152
+
153
+ # used for numerical stability later on
154
+ epsilon = 1e-6
155
+
156
+ for c in range(num_classes):
157
+ detections = []
158
+ ground_truths = []
159
+
160
+ # Go through all predictions and targets,
161
+ # and only add the ones that belong to the
162
+ # current class c
163
+ for detection in pred_boxes:
164
+ if detection[1] == c:
165
+ detections.append(detection)
166
+
167
+ for true_box in true_boxes:
168
+ if true_box[1] == c:
169
+ ground_truths.append(true_box)
170
+
171
+ # find the amount of bboxes for each training example
172
+ # Counter here finds how many ground truth bboxes we get
173
+ # for each training example, so let's say img 0 has 3,
174
+ # img 1 has 5 then we will obtain a dictionary with:
175
+ # amount_bboxes = {0:3, 1:5}
176
+ amount_bboxes = Counter([gt[0] for gt in ground_truths])
177
+
178
+ # We then go through each key, val in this dictionary
179
+ # and convert to the following (w.r.t same example):
180
+ # ammount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]}
181
+ for key, val in amount_bboxes.items():
182
+ amount_bboxes[key] = torch.zeros(val)
183
+
184
+ # sort by box probabilities which is index 2
185
+ detections.sort(key=lambda x: x[2], reverse=True)
186
+ TP = torch.zeros((len(detections)))
187
+ FP = torch.zeros((len(detections)))
188
+ total_true_bboxes = len(ground_truths)
189
+
190
+ # If none exists for this class then we can safely skip
191
+ if total_true_bboxes == 0:
192
+ continue
193
+
194
+ for detection_idx, detection in enumerate(detections):
195
+ # Only take out the ground_truths that have the same
196
+ # training idx as detection
197
+ ground_truth_img = [
198
+ bbox for bbox in ground_truths if bbox[0] == detection[0]
199
+ ]
200
+
201
+ num_gts = len(ground_truth_img)
202
+ best_iou = 0
203
+
204
+ for idx, gt in enumerate(ground_truth_img):
205
+ iou = intersection_over_union(
206
+ torch.tensor(detection[3:]),
207
+ torch.tensor(gt[3:]),
208
+ box_format=box_format,
209
+ )
210
+
211
+ if iou > best_iou:
212
+ best_iou = iou
213
+ best_gt_idx = idx
214
+
215
+ if best_iou > iou_threshold:
216
+ # only detect ground truth detection once
217
+ if amount_bboxes[detection[0]][best_gt_idx] == 0:
218
+ # true positive and add this bounding box to seen
219
+ TP[detection_idx] = 1
220
+ amount_bboxes[detection[0]][best_gt_idx] = 1
221
+ else:
222
+ FP[detection_idx] = 1
223
+
224
+ # if IOU is lower then the detection is a false positive
225
+ else:
226
+ FP[detection_idx] = 1
227
+
228
+ TP_cumsum = torch.cumsum(TP, dim=0)
229
+ FP_cumsum = torch.cumsum(FP, dim=0)
230
+ recalls = TP_cumsum / (total_true_bboxes + epsilon)
231
+ precisions = TP_cumsum / (TP_cumsum + FP_cumsum + epsilon)
232
+ precisions = torch.cat((torch.tensor([1]), precisions))
233
+ recalls = torch.cat((torch.tensor([0]), recalls))
234
+ # torch.trapz for numerical integration
235
+ average_precisions.append(torch.trapz(precisions, recalls))
236
+
237
+ return sum(average_precisions) / len(average_precisions)
238
+
239
+
240
+ def plot_image(image, boxes):
241
+ """Plots predicted bounding boxes on the image"""
242
+ cmap = plt.get_cmap("tab20b")
243
+ class_labels = config.COCO_LABELS if config.DATASET=='COCO' else config.PASCAL_CLASSES
244
+ colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))]
245
+ im = np.array(image)
246
+ height, width, _ = im.shape
247
+
248
+ # Create figure and axes
249
+ fig, ax = plt.subplots(1)
250
+ # Display the image
251
+ ax.imshow(im)
252
+
253
+ # box[0] is x midpoint, box[2] is width
254
+ # box[1] is y midpoint, box[3] is height
255
+
256
+ # Create a Rectangle patch
257
+ for box in boxes:
258
+ assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
259
+ class_pred = box[0]
260
+ box = box[2:]
261
+ upper_left_x = box[0] - box[2] / 2
262
+ upper_left_y = box[1] - box[3] / 2
263
+ rect = patches.Rectangle(
264
+ (upper_left_x * width, upper_left_y * height),
265
+ box[2] * width,
266
+ box[3] * height,
267
+ linewidth=2,
268
+ edgecolor=colors[int(class_pred)],
269
+ facecolor="none",
270
+ )
271
+ # Add the patch to the Axes
272
+ ax.add_patch(rect)
273
+ plt.text(
274
+ upper_left_x * width,
275
+ upper_left_y * height,
276
+ s=class_labels[int(class_pred)],
277
+ color="white",
278
+ verticalalignment="top",
279
+ bbox={"color": colors[int(class_pred)], "pad": 0},
280
+ )
281
+
282
+ plt.show()
283
+
284
+
285
+ def get_evaluation_bboxes(
286
+ loader,
287
+ model,
288
+ iou_threshold,
289
+ anchors,
290
+ threshold,
291
+ box_format="midpoint",
292
+ device="cuda",
293
+ ):
294
+ # make sure model is in eval before get bboxes
295
+ model.eval()
296
+ train_idx = 0
297
+ all_pred_boxes = []
298
+ all_true_boxes = []
299
+ for batch_idx, (x, labels) in enumerate(tqdm(loader)):
300
+ x = x.to(device)
301
+
302
+ with torch.no_grad():
303
+ predictions = model(x)
304
+
305
+ batch_size = x.shape[0]
306
+ bboxes = [[] for _ in range(batch_size)]
307
+ for i in range(3):
308
+ S = predictions[i].shape[2]
309
+ anchor = torch.tensor([*anchors[i]]).to(device) * S
310
+ boxes_scale_i = cells_to_bboxes(
311
+ predictions[i], anchor, S=S, is_preds=True
312
+ )
313
+ for idx, (box) in enumerate(boxes_scale_i):
314
+ bboxes[idx] += box
315
+
316
+ # we just want one bbox for each label, not one for each scale
317
+ true_bboxes = cells_to_bboxes(
318
+ labels[2], anchor, S=S, is_preds=False
319
+ )
320
+
321
+ for idx in range(batch_size):
322
+ nms_boxes = non_max_suppression(
323
+ bboxes[idx],
324
+ iou_threshold=iou_threshold,
325
+ threshold=threshold,
326
+ box_format=box_format,
327
+ )
328
+
329
+ for nms_box in nms_boxes:
330
+ all_pred_boxes.append([train_idx] + nms_box)
331
+
332
+ for box in true_bboxes[idx]:
333
+ if box[1] > threshold:
334
+ all_true_boxes.append([train_idx] + box)
335
+
336
+ train_idx += 1
337
+
338
+ model.train()
339
+ return all_pred_boxes, all_true_boxes
340
+
341
+
342
+ def cells_to_bboxes(predictions, anchors, S, is_preds=True):
343
+ """
344
+ Scales the predictions coming from the model to
345
+ be relative to the entire image such that they for example later
346
+ can be plotted or.
347
+ INPUT:
348
+ predictions: tensor of size (N, 3, S, S, num_classes+5)
349
+ anchors: the anchors used for the predictions
350
+ S: the number of cells the image is divided in on the width (and height)
351
+ is_preds: whether the input is predictions or the true bounding boxes
352
+ OUTPUT:
353
+ converted_bboxes: the converted boxes of sizes (N, num_anchors, S, S, 1+5) with class index,
354
+ object score, bounding box coordinates
355
+ """
356
+ BATCH_SIZE = predictions.shape[0]
357
+ num_anchors = len(anchors)
358
+ box_predictions = predictions[..., 1:5]
359
+ if is_preds:
360
+ anchors = anchors.reshape(1, len(anchors), 1, 1, 2)
361
+ box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2])
362
+ box_predictions[..., 2:] = torch.exp(box_predictions[..., 2:]) * anchors
363
+ scores = torch.sigmoid(predictions[..., 0:1])
364
+ best_class = torch.argmax(predictions[..., 5:], dim=-1).unsqueeze(-1)
365
+ else:
366
+ scores = predictions[..., 0:1]
367
+ best_class = predictions[..., 5:6]
368
+
369
+ cell_indices = (
370
+ torch.arange(S)
371
+ .repeat(predictions.shape[0], 3, S, 1)
372
+ .unsqueeze(-1)
373
+ .to(predictions.device)
374
+ )
375
+ x = 1 / S * (box_predictions[..., 0:1] + cell_indices)
376
+ y = 1 / S * (box_predictions[..., 1:2] + cell_indices.permute(0, 1, 3, 2, 4))
377
+ w_h = 1 / S * box_predictions[..., 2:4]
378
+ converted_bboxes = torch.cat((best_class, scores, x, y, w_h), dim=-1).reshape(BATCH_SIZE, num_anchors * S * S, 6)
379
+ return converted_bboxes.tolist()
380
+
381
+ def check_class_accuracy(model, loader, threshold):
382
+ # model.eval()
383
+ tot_class_preds, correct_class = 0, 0
384
+ tot_noobj, correct_noobj = 0, 0
385
+ tot_obj, correct_obj = 0, 0
386
+
387
+ for idx, (x, y) in enumerate(tqdm(loader)):
388
+ x = x.to(config.DEVICE)
389
+ with torch.no_grad():
390
+ out = model(x)
391
+
392
+ for i in range(3):
393
+ y[i] = y[i].to(config.DEVICE)
394
+ obj = y[i][..., 0] == 1 # in paper this is Iobj_i
395
+ noobj = y[i][..., 0] == 0 # in paper this is Iobj_i
396
+
397
+ correct_class += torch.sum(
398
+ torch.argmax(out[i][..., 5:][obj], dim=-1) == y[i][..., 5][obj]
399
+ )
400
+ tot_class_preds += torch.sum(obj)
401
+
402
+ obj_preds = torch.sigmoid(out[i][..., 0]) > threshold
403
+ correct_obj += torch.sum(obj_preds[obj] == y[i][..., 0][obj])
404
+ tot_obj += torch.sum(obj)
405
+ correct_noobj += torch.sum(obj_preds[noobj] == y[i][..., 0][noobj])
406
+ tot_noobj += torch.sum(noobj)
407
+
408
+ # print(f"Class accuracy is: {(correct_class/(tot_class_preds+1e-16))*100:2f}%")
409
+ # print(f"No obj accuracy is: {(correct_noobj/(tot_noobj+1e-16))*100:2f}%")
410
+ # print(f"Obj accuracy is: {(correct_obj/(tot_obj+1e-16))*100:2f}%")
411
+ # model.train()
412
+ class_acc = (correct_class / (tot_class_preds + 1e-16)) * 100
413
+ no_obj_acc = (correct_noobj / (tot_noobj + 1e-16)) * 100
414
+ obj_acc = (correct_obj / (tot_obj + 1e-16)) * 100
415
+ return class_acc, no_obj_acc, obj_acc
416
+
417
+ def get_mean_std(loader):
418
+ # var[X] = E[X**2] - E[X]**2
419
+ channels_sum, channels_sqrd_sum, num_batches = 0, 0, 0
420
+
421
+ for data, _ in tqdm(loader):
422
+ channels_sum += torch.mean(data, dim=[0, 2, 3])
423
+ channels_sqrd_sum += torch.mean(data ** 2, dim=[0, 2, 3])
424
+ num_batches += 1
425
+
426
+ mean = channels_sum / num_batches
427
+ std = (channels_sqrd_sum / num_batches - mean ** 2) ** 0.5
428
+
429
+ return mean, std
430
+
431
+
432
+ def save_checkpoint(model, optimizer, filename="my_checkpoint.pth.tar"):
433
+ print("=> Saving checkpoint")
434
+ checkpoint = {
435
+ "state_dict": model.state_dict(),
436
+ "optimizer": optimizer.state_dict(),
437
+ }
438
+ torch.save(checkpoint, filename)
439
+
440
+
441
+ def load_checkpoint(checkpoint_file, model, optimizer, lr):
442
+ print("=> Loading checkpoint")
443
+ checkpoint = torch.load(checkpoint_file, map_location=config.DEVICE)
444
+ model.load_state_dict(checkpoint["state_dict"])
445
+ optimizer.load_state_dict(checkpoint["optimizer"])
446
+
447
+ # If we don't do this then it will just have learning rate of old checkpoint
448
+ # and it will lead to many hours of debugging \:
449
+ for param_group in optimizer.param_groups:
450
+ param_group["lr"] = lr
451
+
452
+
453
+ def get_loaders(train_csv_path, test_csv_path):
454
+ from dataset import YOLOTrainDataset, YOLOTestDataset
455
+
456
+ IMAGE_SIZE = config.IMAGE_SIZE
457
+ train_dataset = YOLOTrainDataset(
458
+ train_csv_path,
459
+ transform=config.train_transforms,
460
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
461
+ img_dir=config.IMG_DIR,
462
+ label_dir=config.LABEL_DIR,
463
+ anchors=config.ANCHORS,
464
+ )
465
+ test_dataset = YOLOTestDataset(
466
+ test_csv_path,
467
+ transform=config.test_transforms,
468
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
469
+ img_dir=config.IMG_DIR,
470
+ label_dir=config.LABEL_DIR,
471
+ anchors=config.ANCHORS,
472
+ )
473
+ train_loader = DataLoader(
474
+ dataset=train_dataset,
475
+ batch_size=config.BATCH_SIZE,
476
+ num_workers=config.NUM_WORKERS,
477
+ pin_memory=config.PIN_MEMORY,
478
+ shuffle=True,
479
+ drop_last=False,
480
+ )
481
+ test_loader = DataLoader(
482
+ dataset=test_dataset,
483
+ batch_size=config.BATCH_SIZE,
484
+ num_workers=config.NUM_WORKERS,
485
+ pin_memory=config.PIN_MEMORY,
486
+ shuffle=False,
487
+ drop_last=False,
488
+ )
489
+
490
+ train_eval_dataset = YOLOTestDataset(
491
+ train_csv_path,
492
+ transform=config.test_transforms,
493
+ S=[IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8],
494
+ img_dir=config.IMG_DIR,
495
+ label_dir=config.LABEL_DIR,
496
+ anchors=config.ANCHORS,
497
+ )
498
+ train_eval_loader = DataLoader(
499
+ dataset=train_eval_dataset,
500
+ batch_size=config.BATCH_SIZE,
501
+ num_workers=config.NUM_WORKERS,
502
+ pin_memory=config.PIN_MEMORY,
503
+ shuffle=False,
504
+ drop_last=False,
505
+ )
506
+
507
+ return train_loader, test_loader, train_eval_loader
508
+
509
+ def plot_couple_examples(model, loader, thresh, iou_thresh, anchors):
510
+ model.eval()
511
+ x, y = next(iter(loader))
512
+ x = x.to("cuda")
513
+ with torch.no_grad():
514
+ out = model(x)
515
+ bboxes = [[] for _ in range(x.shape[0])]
516
+ for i in range(3):
517
+ batch_size, A, S, _, _ = out[i].shape
518
+ anchor = anchors[i]
519
+ boxes_scale_i = cells_to_bboxes(
520
+ out[i], anchor, S=S, is_preds=True
521
+ )
522
+ for idx, (box) in enumerate(boxes_scale_i):
523
+ bboxes[idx] += box
524
+
525
+ model.train()
526
+
527
+ for i in range(batch_size//4):
528
+ nms_boxes = non_max_suppression(
529
+ bboxes[i], iou_threshold=iou_thresh, threshold=thresh, box_format="midpoint",
530
+ )
531
+ plot_image(x[i].permute(1,2,0).detach().cpu(), nms_boxes)
532
+
533
+
534
+
535
+ # def seed_everything(seed=42):
536
+ # os.environ['PYTHONHASHSEED'] = str(seed)
537
+ # random.seed(seed)
538
+ # np.random.seed(seed)
539
+ # torch.manual_seed(seed)
540
+ # torch.cuda.manual_seed(seed)
541
+ # torch.cuda.manual_seed_all(seed)
542
+ # torch.backends.cudnn.deterministic = True
543
+ # torch.backends.cudnn.benchmark = False
544
+
545
+
546
+ def clip_coords(boxes, img_shape):
547
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
548
+ boxes[:, 0].clamp_(0, img_shape[1]) # x1
549
+ boxes[:, 1].clamp_(0, img_shape[0]) # y1
550
+ boxes[:, 2].clamp_(0, img_shape[1]) # x2
551
+ boxes[:, 3].clamp_(0, img_shape[0]) # y2
552
+
553
+ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
554
+ # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
555
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
556
+ y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
557
+ y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
558
+ y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x
559
+ y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y
560
+ return y
561
+
562
+
563
+ def xyn2xy(x, w=640, h=640, padw=0, padh=0):
564
+ # Convert normalized segments into pixel segments, shape (n,2)
565
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
566
+ y[..., 0] = w * x[..., 0] + padw # top left x
567
+ y[..., 1] = h * x[..., 1] + padh # top left y
568
+ return y
569
+
570
+ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
571
+ # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
572
+ if clip:
573
+ clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
574
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
575
+ y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center
576
+ y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center
577
+ y[..., 2] = (x[..., 2] - x[..., 0]) / w # width
578
+ y[..., 3] = (x[..., 3] - x[..., 1]) / h # height
579
+ return y
580
+
581
+ def clip_boxes(boxes, shape):
582
+ # Clip boxes (xyxy) to image shape (height, width)
583
+ if isinstance(boxes, torch.Tensor): # faster individually
584
+ boxes[..., 0].clamp_(0, shape[1]) # x1
585
+ boxes[..., 1].clamp_(0, shape[0]) # y1
586
+ boxes[..., 2].clamp_(0, shape[1]) # x2
587
+ boxes[..., 3].clamp_(0, shape[0]) # y2
588
+ else: # np.array (faster grouped)
589
+ boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
590
+ boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
591
+
592
+
593
+ def plot_image(image, boxes):
594
+ """Plots predicted bounding boxes on the image"""
595
+ cmap = plt.get_cmap("tab20b")
596
+ class_labels = config.COCO_LABELS if config.DATASET=='COCO' else config.PASCAL_CLASSES
597
+ colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))]
598
+ im = np.array(image)
599
+ height, width, _ = im.shape
600
+
601
+ # Create figure and axes
602
+ fig, ax = plt.subplots(1)
603
+ # Display the image
604
+ ax.imshow(im)
605
+
606
+ # box[0] is x midpoint, box[2] is width
607
+ # box[1] is y midpoint, box[3] is height
608
+
609
+ # Create a Rectangle patch
610
+ for box in boxes:
611
+ assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
612
+ class_pred = box[0]
613
+ box = box[2:]
614
+ upper_left_x = box[0] - box[2] / 2
615
+ upper_left_y = box[1] - box[3] / 2
616
+ rect = patches.Rectangle(
617
+ (upper_left_x * width, upper_left_y * height),
618
+ box[2] * width,
619
+ box[3] * height,
620
+ linewidth=2,
621
+ edgecolor=colors[int(class_pred)],
622
+ facecolor="none",
623
+ )
624
+ # Add the patch to the Axes
625
+ ax.add_patch(rect)
626
+ plt.text(
627
+ upper_left_x * width,
628
+ upper_left_y * height,
629
+ s=class_labels[int(class_pred)],
630
+ color="white",
631
+ verticalalignment="top",
632
+ bbox={"color": colors[int(class_pred)], "pad": 0},
633
+ )
634
+
635
+ plt.show()
636
+ return fig
637
+
638
+ class YoloCAM(BaseCAM):
639
+ def __init__(self, model, target_layers, use_cuda=False,
640
+ reshape_transform=None):
641
+ super(YoloCAM, self).__init__(model,
642
+ target_layers,
643
+ use_cuda,
644
+ reshape_transform,
645
+ uses_gradients=False)
646
+
647
+ def forward(self,
648
+ input_tensor: torch.Tensor,
649
+ scaled_anchors: torch.Tensor,
650
+ targets: List[torch.nn.Module],
651
+ eigen_smooth: bool = False) -> np.ndarray:
652
+
653
+ if self.cuda:
654
+ input_tensor = input_tensor.cuda()
655
+
656
+ if self.compute_input_gradient:
657
+ input_tensor = torch.autograd.Variable(input_tensor,
658
+ requires_grad=True)
659
+
660
+ outputs = self.activations_and_grads(input_tensor)
661
+ if targets is None:
662
+ bboxes = [[] for _ in range(1)]
663
+ for i in range(3):
664
+ batch_size, A, S, _, _ = outputs[i].shape
665
+ anchor = scaled_anchors[i]
666
+ boxes_scale_i = cells_to_bboxes(
667
+ outputs[i], anchor, S=S, is_preds=True
668
+ )
669
+ for idx, (box) in enumerate(boxes_scale_i):
670
+ bboxes[idx] += box
671
+
672
+ nms_boxes = non_max_suppression(
673
+ bboxes[0], iou_threshold=0.5, threshold=0.4, box_format="midpoint",
674
+ )
675
+ # target_categories = np.argmax(outputs.cpu().data.numpy(), axis=-1)
676
+ target_categories = [box[0] for box in nms_boxes]
677
+ targets = [ClassifierOutputTarget(
678
+ category) for category in target_categories]
679
+
680
+ if self.uses_gradients:
681
+ self.model.zero_grad()
682
+ loss = sum([target(output)
683
+ for target, output in zip(targets, outputs)])
684
+ loss.backward(retain_graph=True)
685
+
686
+ # In most of the saliency attribution papers, the saliency is
687
+ # computed with a single target layer.
688
+ # Commonly it is the last convolutional layer.
689
+ # Here we support passing a list with multiple target layers.
690
+ # It will compute the saliency image for every image,
691
+ # and then aggregate them (with a default mean aggregation).
692
+ # This gives you more flexibility in case you just want to
693
+ # use all conv layers for example, all Batchnorm layers,
694
+ # or something else.
695
+ cam_per_layer = self.compute_cam_per_layer(input_tensor,
696
+ targets,
697
+ eigen_smooth)
698
+ return self.aggregate_multi_layers(cam_per_layer)
699
+
700
+ def get_cam_image(self,
701
+ input_tensor,
702
+ target_layer,
703
+ target_category,
704
+ activations,
705
+ grads,
706
+ eigen_smooth):
707
+ return get_2d_projection(activations)
utils_app.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+ import numpy as np
4
+ import cv2
5
+ import random
6
+ import matplotlib.pyplot as plt
7
+ import matplotlib.patches as patches
8
+ from pytorch_grad_cam.base_cam import BaseCAM
9
+ from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection
10
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
11
+ import config
12
+
13
+ def cells_to_bboxes(predictions, anchors, S, is_preds=True):
14
+ """
15
+ Scales the predictions coming from the model to
16
+ be relative to the entire image such that they for example later
17
+ can be plotted or.
18
+ INPUT:
19
+ predictions: tensor of size (N, 3, S, S, num_classes+5)
20
+ anchors: the anchors used for the predictions
21
+ S: the number of cells the image is divided in on the width (and height)
22
+ is_preds: whether the input is predictions or the true bounding boxes
23
+ OUTPUT:
24
+ converted_bboxes: the converted boxes of sizes (N, num_anchors, S, S, 1+5) with class index,
25
+ object score, bounding box coordinates
26
+ """
27
+ BATCH_SIZE = predictions.shape[0]
28
+ num_anchors = len(anchors)
29
+ box_predictions = predictions[..., 1:5]
30
+ if is_preds:
31
+ anchors = anchors.reshape(1, len(anchors), 1, 1, 2)
32
+ box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2])
33
+ box_predictions[..., 2:] = torch.exp(box_predictions[..., 2:]) * anchors
34
+ scores = torch.sigmoid(predictions[..., 0:1])
35
+ best_class = torch.argmax(predictions[..., 5:], dim=-1).unsqueeze(-1)
36
+ else:
37
+ scores = predictions[..., 0:1]
38
+ best_class = predictions[..., 5:6]
39
+
40
+ cell_indices = (
41
+ torch.arange(S)
42
+ .repeat(predictions.shape[0], 3, S, 1)
43
+ .unsqueeze(-1)
44
+ .to(predictions.device)
45
+ )
46
+ x = 1 / S * (box_predictions[..., 0:1] + cell_indices)
47
+ y = 1 / S * (box_predictions[..., 1:2] + cell_indices.permute(0, 1, 3, 2, 4))
48
+ w_h = 1 / S * box_predictions[..., 2:4]
49
+ converted_bboxes = torch.cat((best_class, scores, x, y, w_h), dim=-1).reshape(BATCH_SIZE, num_anchors * S * S, 6)
50
+ return converted_bboxes.tolist()
51
+
52
+
53
+
54
+ def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"):
55
+ """
56
+ Video explanation of this function:
57
+ https://youtu.be/XXYG5ZWtjj0
58
+ This function calculates intersection over union (iou) given pred boxes
59
+ and target boxes.
60
+ Parameters:
61
+ boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
62
+ boxes_labels (tensor): Correct labels of Bounding Boxes (BATCH_SIZE, 4)
63
+ box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
64
+ Returns:
65
+ tensor: Intersection over union for all examples
66
+ """
67
+
68
+ if box_format == "midpoint":
69
+ box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
70
+ box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
71
+ box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
72
+ box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
73
+ box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
74
+ box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
75
+ box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
76
+ box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
77
+
78
+ if box_format == "corners":
79
+ box1_x1 = boxes_preds[..., 0:1]
80
+ box1_y1 = boxes_preds[..., 1:2]
81
+ box1_x2 = boxes_preds[..., 2:3]
82
+ box1_y2 = boxes_preds[..., 3:4]
83
+ box2_x1 = boxes_labels[..., 0:1]
84
+ box2_y1 = boxes_labels[..., 1:2]
85
+ box2_x2 = boxes_labels[..., 2:3]
86
+ box2_y2 = boxes_labels[..., 3:4]
87
+
88
+ x1 = torch.max(box1_x1, box2_x1)
89
+ y1 = torch.max(box1_y1, box2_y1)
90
+ x2 = torch.min(box1_x2, box2_x2)
91
+ y2 = torch.min(box1_y2, box2_y2)
92
+
93
+ intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
94
+ box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
95
+ box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
96
+
97
+ return intersection / (box1_area + box2_area - intersection + 1e-6)
98
+
99
+ def non_max_suppression(bboxes, iou_threshold, threshold, box_format="corners"):
100
+ """
101
+ Video explanation of this function:
102
+ https://youtu.be/YDkjWEN8jNA
103
+ Does Non Max Suppression given bboxes
104
+ Parameters:
105
+ bboxes (list): list of lists containing all bboxes with each bboxes
106
+ specified as [class_pred, prob_score, x1, y1, x2, y2]
107
+ iou_threshold (float): threshold where predicted bboxes is correct
108
+ threshold (float): threshold to remove predicted bboxes (independent of IoU)
109
+ box_format (str): "midpoint" or "corners" used to specify bboxes
110
+ Returns:
111
+ list: bboxes after performing NMS given a specific IoU threshold
112
+ """
113
+
114
+ assert type(bboxes) == list
115
+
116
+ bboxes = [box for box in bboxes if box[1] > threshold]
117
+ bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
118
+ bboxes_after_nms = []
119
+
120
+ while bboxes:
121
+ chosen_box = bboxes.pop(0)
122
+
123
+ bboxes = [
124
+ box
125
+ for box in bboxes
126
+ if box[0] != chosen_box[0]
127
+ or intersection_over_union(
128
+ torch.tensor(chosen_box[2:]),
129
+ torch.tensor(box[2:]),
130
+ box_format=box_format,
131
+ )
132
+ < iou_threshold
133
+ ]
134
+
135
+ bboxes_after_nms.append(chosen_box)
136
+
137
+ return bboxes_after_nms
138
+
139
+ def plot_image(image, boxes):
140
+ """Plots predicted bounding boxes on the image"""
141
+ cmap = plt.get_cmap("tab20b")
142
+ class_labels = config.COCO_LABELS if config.DATASET=='COCO' else config.PASCAL_CLASSES
143
+ colors = [cmap(i) for i in np.linspace(0, 1, len(class_labels))]
144
+ im = np.array(image)
145
+ height, width, _ = im.shape
146
+
147
+ # Create figure and axes
148
+ fig, ax = plt.subplots(1)
149
+ # Display the image
150
+ ax.imshow(im)
151
+
152
+ # box[0] is x midpoint, box[2] is width
153
+ # box[1] is y midpoint, box[3] is height
154
+
155
+ # Create a Rectangle patch
156
+ for box in boxes:
157
+ assert len(box) == 6, "box should contain class pred, confidence, x, y, width, height"
158
+ class_pred = box[0]
159
+ box = box[2:]
160
+ upper_left_x = box[0] - box[2] / 2
161
+ upper_left_y = box[1] - box[3] / 2
162
+ rect = patches.Rectangle(
163
+ (upper_left_x * width, upper_left_y * height),
164
+ box[2] * width,
165
+ box[3] * height,
166
+ linewidth=2,
167
+ edgecolor=colors[int(class_pred)],
168
+ facecolor="none",
169
+ )
170
+ # Add the patch to the Axes
171
+ ax.add_patch(rect)
172
+ plt.text(
173
+ upper_left_x * width,
174
+ upper_left_y * height,
175
+ s=class_labels[int(class_pred)],
176
+ color="white",
177
+ verticalalignment="top",
178
+ bbox={"color": colors[int(class_pred)], "pad": 0},
179
+ )
180
+
181
+ plt.show()
182
+ return fig
183
+
184
+
185
+
186
+ class YoloCAM(BaseCAM):
187
+ def __init__(self, model, target_layers, use_cuda=False,
188
+ reshape_transform=None):
189
+ super(YoloCAM, self).__init__(model,
190
+ target_layers,
191
+ use_cuda,
192
+ reshape_transform,
193
+ uses_gradients=False)
194
+
195
+ def forward(self,
196
+ input_tensor: torch.Tensor,
197
+ scaled_anchors: torch.Tensor,
198
+ targets: List[torch.nn.Module],
199
+ eigen_smooth: bool = False) -> np.ndarray:
200
+
201
+ if self.cuda:
202
+ input_tensor = input_tensor.cuda()
203
+
204
+ if self.compute_input_gradient:
205
+ input_tensor = torch.autograd.Variable(input_tensor,
206
+ requires_grad=True)
207
+
208
+ outputs = self.activations_and_grads(input_tensor)
209
+ if targets is None:
210
+ bboxes = [[] for _ in range(1)]
211
+ for i in range(3):
212
+ batch_size, A, S, _, _ = outputs[i].shape
213
+ anchor = scaled_anchors[i]
214
+ boxes_scale_i = cells_to_bboxes(
215
+ outputs[i], anchor, S=S, is_preds=True
216
+ )
217
+ for idx, (box) in enumerate(boxes_scale_i):
218
+ bboxes[idx] += box
219
+
220
+ nms_boxes = non_max_suppression(
221
+ bboxes[0], iou_threshold=0.5, threshold=0.4, box_format="midpoint",
222
+ )
223
+ # target_categories = np.argmax(outputs.cpu().data.numpy(), axis=-1)
224
+ target_categories = [box[0] for box in nms_boxes]
225
+ targets = [ClassifierOutputTarget(
226
+ category) for category in target_categories]
227
+
228
+ if self.uses_gradients:
229
+ self.model.zero_grad()
230
+ loss = sum([target(output)
231
+ for target, output in zip(targets, outputs)])
232
+ loss.backward(retain_graph=True)
233
+
234
+ cam_per_layer = self.compute_cam_per_layer(input_tensor,
235
+ targets,
236
+ eigen_smooth)
237
+ return self.aggregate_multi_layers(cam_per_layer)
238
+
239
+ def get_cam_image(self,
240
+ input_tensor,
241
+ target_layer,
242
+ target_category,
243
+ activations,
244
+ grads,
245
+ eigen_smooth):
246
+ return get_2d_projection(activations)
yolo_lightning.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.optim as optim
3
+ import lightning.pytorch as pl
4
+ from tqdm import tqdm
5
+ from model import YOLOv3
6
+ from loss import YoloLoss
7
+ from utils import get_loaders, load_checkpoint, check_class_accuracy, intersection_over_union
8
+ import config
9
+ from torch.optim.lr_scheduler import OneCycleLR
10
+
11
+
12
+ class YOLOv3Lightning(pl.LightningModule):
13
+ def __init__(self, config, lr_value=0):
14
+ super().__init__()
15
+ self.automatic_optimization =True
16
+ self.config = config
17
+ self.model = YOLOv3(num_classes=self.config.NUM_CLASSES)
18
+ self.loss_fn = YoloLoss()
19
+
20
+ if lr_value == 0:
21
+ self.learning_rate = self.config.LEARNING_RATE
22
+ else:
23
+ self.learning_rate = lr_value
24
+
25
+ def forward(self, x):
26
+ return self.model(x)
27
+
28
+ def configure_optimizers(self):
29
+ optimizer = optim.Adam(self.model.parameters(), lr=self.config.LEARNING_RATE, weight_decay=self.config.WEIGHT_DECAY)
30
+ EPOCHS = self.config.NUM_EPOCHS * 2 // 5
31
+ scheduler = OneCycleLR(optimizer, max_lr=1E-3, steps_per_epoch=len(self.train_dataloader()), epochs=EPOCHS, pct_start=5/EPOCHS, div_factor=100, three_phase=False, final_div_factor=100, anneal_strategy='linear')
32
+ return [optimizer], [{"scheduler": scheduler, "interval": "step", "frequency": 1}]
33
+
34
+ def train_dataloader(self):
35
+ train_loader, _, _ = get_loaders(
36
+ train_csv_path=self.config.DATASET + "/train.csv",
37
+ test_csv_path=self.config.DATASET + "/test.csv",
38
+ )
39
+ return train_loader
40
+
41
+ def training_step(self, batch, batch_idx):
42
+ x, y = batch
43
+ y0, y1, y2 = (y[0].to(self.device),y[1].to(self.device),y[2].to(self.device))
44
+ out = self(x)
45
+
46
+ loss = (self.loss_fn(out[0], y0, self.scaled_anchors[0])
47
+ + self.loss_fn(out[1], y1, self.scaled_anchors[1])
48
+ + self.loss_fn(out[2], y2, self.scaled_anchors[2]))
49
+
50
+ self.log('train_loss', loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
51
+ return loss
52
+
53
+ def val_dataloader(self):
54
+ _, _, val_loader = get_loaders(
55
+ train_csv_path=self.config.DATASET + "/train.csv",
56
+ test_csv_path=self.config.DATASET + "/test.csv",
57
+ )
58
+
59
+ return val_loader
60
+
61
+ def validation_step(self, batch, batch_idx):
62
+ x, y = batch
63
+ y0, y1, y2 = (
64
+ y[0].to(self.device),
65
+ y[1].to(self.device),
66
+ y[2].to(self.device),
67
+ )
68
+ out = self(x)
69
+ loss = (
70
+ self.loss_fn(out[0], y0, self.scaled_anchors[0])
71
+ + self.loss_fn(out[1], y1, self.scaled_anchors[1])
72
+ + self.loss_fn(out[2], y2, self.scaled_anchors[2])
73
+ )
74
+
75
+ self.log('val_loss', loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
76
+
77
+
78
+ def test_dataloader(self):
79
+ _, test_loader, _ = get_loaders(
80
+ train_csv_path=self.config.DATASET + "/train.csv",
81
+ test_csv_path=self.config.DATASET + "/test.csv",
82
+ )
83
+ return test_loader
84
+
85
+ def test_step(self, batch, batch_idx):
86
+ x, y = batch
87
+ y0, y1, y2 = (
88
+ y[0].to(self.device),
89
+ y[1].to(self.device),
90
+ y[2].to(self.device),
91
+ )
92
+ out = self(x)
93
+ loss = (
94
+ self.loss_fn(out[0], y0, self.scaled_anchors[0])
95
+ + self.loss_fn(out[1], y1, self.scaled_anchors[1])
96
+ + self.loss_fn(out[2], y2, self.scaled_anchors[2])
97
+ )
98
+ self.log('test_loss', loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
99
+
100
+ def on_train_start(self):
101
+ if self.config.LOAD_MODEL:
102
+ load_checkpoint(self.config.CHECKPOINT_FILE, self.model, self.optimizers(), self.config.LEARNING_RATE)
103
+ self.scaled_anchors = (
104
+ torch.tensor(self.config.ANCHORS)
105
+ * torch.tensor(self.config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
106
+ ).to(self.device)