kisa-misa commited on
Commit
7b798bf
1 Parent(s): f8d44ec

Upload 5 files

Browse files
Files changed (5) hide show
  1. __init__.py +5 -0
  2. predict.log +14 -0
  3. train.py +217 -0
  4. val.py +241 -0
  5. yolov8n.pt +3 -0
__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Ultralytics YOLO 🚀, GPL-3.0 license
2
+
3
+ from .predict import DetectionPredictor, predict
4
+ from .train import DetectionTrainer, train
5
+ from .val import DetectionValidator, val
predict.log ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-05-04 16:39:12,864][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
2
+ [2023-05-04 16:40:14,916][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
3
+ [2023-05-04 16:55:53,406][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
4
+ [2023-05-04 16:56:51,592][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
5
+ [2023-05-04 16:59:18,102][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
6
+ [2023-05-04 17:00:33,437][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
7
+ [2023-05-04 17:11:10,886][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
8
+ [2023-05-04 17:16:02,219][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
9
+ [2023-05-04 17:16:13,544][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
10
+ [2023-05-04 17:31:26,624][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
11
+ [2023-05-04 17:37:02,618][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
12
+ [2023-05-04 17:53:48,544][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
13
+ [2023-05-04 17:54:46,340][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
14
+ [2023-05-04 18:11:37,927][root.tracker][INFO] - Loading weights from deep_sort_pytorch/deep_sort/deep/checkpoint/ckpt.t7... Done!
train.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ultralytics YOLO 🚀, GPL-3.0 license
2
+
3
+ from copy import copy
4
+
5
+ import hydra
6
+ import torch
7
+ import torch.nn as nn
8
+
9
+ from ultralytics.nn.tasks import DetectionModel
10
+ from ultralytics.yolo import v8
11
+ from ultralytics.yolo.data import build_dataloader
12
+ from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader
13
+ from ultralytics.yolo.engine.trainer import BaseTrainer
14
+ from ultralytics.yolo.utils import DEFAULT_CONFIG, colorstr
15
+ from ultralytics.yolo.utils.loss import BboxLoss
16
+ from ultralytics.yolo.utils.ops import xywh2xyxy
17
+ from ultralytics.yolo.utils.plotting import plot_images, plot_results
18
+ from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors
19
+ from ultralytics.yolo.utils.torch_utils import de_parallel
20
+
21
+
22
+ # BaseTrainer python usage
23
+ class DetectionTrainer(BaseTrainer):
24
+
25
+ def get_dataloader(self, dataset_path, batch_size, mode="train", rank=0):
26
+ # TODO: manage splits differently
27
+ # calculate stride - check if model is initialized
28
+ gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
29
+ return create_dataloader(path=dataset_path,
30
+ imgsz=self.args.imgsz,
31
+ batch_size=batch_size,
32
+ stride=gs,
33
+ hyp=dict(self.args),
34
+ augment=mode == "train",
35
+ cache=self.args.cache,
36
+ pad=0 if mode == "train" else 0.5,
37
+ rect=self.args.rect,
38
+ rank=rank,
39
+ workers=self.args.workers,
40
+ close_mosaic=self.args.close_mosaic != 0,
41
+ prefix=colorstr(f'{mode}: '),
42
+ shuffle=mode == "train",
43
+ seed=self.args.seed)[0] if self.args.v5loader else \
44
+ build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, rank=rank, mode=mode)[0]
45
+
46
+ def preprocess_batch(self, batch):
47
+ batch["img"] = batch["img"].to(self.device, non_blocking=True).float() / 255
48
+ return batch
49
+
50
+ def set_model_attributes(self):
51
+ nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps)
52
+ self.args.box *= 3 / nl # scale to layers
53
+ # self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers
54
+ self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
55
+ self.model.nc = self.data["nc"] # attach number of classes to model
56
+ self.model.args = self.args # attach hyperparameters to model
57
+ # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
58
+ self.model.names = self.data["names"]
59
+
60
+ def get_model(self, cfg=None, weights=None, verbose=True):
61
+ model = DetectionModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose)
62
+ if weights:
63
+ model.load(weights)
64
+
65
+ return model
66
+
67
+ def get_validator(self):
68
+ self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss'
69
+ return v8.detect.DetectionValidator(self.test_loader,
70
+ save_dir=self.save_dir,
71
+ logger=self.console,
72
+ args=copy(self.args))
73
+
74
+ def criterion(self, preds, batch):
75
+ if not hasattr(self, 'compute_loss'):
76
+ self.compute_loss = Loss(de_parallel(self.model))
77
+ return self.compute_loss(preds, batch)
78
+
79
+ def label_loss_items(self, loss_items=None, prefix="train"):
80
+ """
81
+ Returns a loss dict with labelled training loss items tensor
82
+ """
83
+ # Not needed for classification but necessary for segmentation & detection
84
+ keys = [f"{prefix}/{x}" for x in self.loss_names]
85
+ if loss_items is not None:
86
+ loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats
87
+ return dict(zip(keys, loss_items))
88
+ else:
89
+ return keys
90
+
91
+ def progress_string(self):
92
+ return ('\n' + '%11s' *
93
+ (4 + len(self.loss_names))) % ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size')
94
+
95
+ def plot_training_samples(self, batch, ni):
96
+ plot_images(images=batch["img"],
97
+ batch_idx=batch["batch_idx"],
98
+ cls=batch["cls"].squeeze(-1),
99
+ bboxes=batch["bboxes"],
100
+ paths=batch["im_file"],
101
+ fname=self.save_dir / f"train_batch{ni}.jpg")
102
+
103
+ def plot_metrics(self):
104
+ plot_results(file=self.csv) # save results.png
105
+
106
+
107
+ # Criterion class for computing training losses
108
+ class Loss:
109
+
110
+ def __init__(self, model): # model must be de-paralleled
111
+
112
+ device = next(model.parameters()).device # get model device
113
+ h = model.args # hyperparameters
114
+
115
+ m = model.model[-1] # Detect() module
116
+ self.bce = nn.BCEWithLogitsLoss(reduction='none')
117
+ self.hyp = h
118
+ self.stride = m.stride # model strides
119
+ self.nc = m.nc # number of classes
120
+ self.no = m.no
121
+ self.reg_max = m.reg_max
122
+ self.device = device
123
+
124
+ self.use_dfl = m.reg_max > 1
125
+ self.assigner = TaskAlignedAssigner(topk=10, num_classes=self.nc, alpha=0.5, beta=6.0)
126
+ self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=self.use_dfl).to(device)
127
+ self.proj = torch.arange(m.reg_max, dtype=torch.float, device=device)
128
+
129
+ def preprocess(self, targets, batch_size, scale_tensor):
130
+ if targets.shape[0] == 0:
131
+ out = torch.zeros(batch_size, 0, 5, device=self.device)
132
+ else:
133
+ i = targets[:, 0] # image index
134
+ _, counts = i.unique(return_counts=True)
135
+ out = torch.zeros(batch_size, counts.max(), 5, device=self.device)
136
+ for j in range(batch_size):
137
+ matches = i == j
138
+ n = matches.sum()
139
+ if n:
140
+ out[j, :n] = targets[matches, 1:]
141
+ out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor))
142
+ return out
143
+
144
+ def bbox_decode(self, anchor_points, pred_dist):
145
+ if self.use_dfl:
146
+ b, a, c = pred_dist.shape # batch, anchors, channels
147
+ pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype))
148
+ # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype))
149
+ # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2)
150
+ return dist2bbox(pred_dist, anchor_points, xywh=False)
151
+
152
+ def __call__(self, preds, batch):
153
+ loss = torch.zeros(3, device=self.device) # box, cls, dfl
154
+ feats = preds[1] if isinstance(preds, tuple) else preds
155
+ pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split(
156
+ (self.reg_max * 4, self.nc), 1)
157
+
158
+ pred_scores = pred_scores.permute(0, 2, 1).contiguous()
159
+ pred_distri = pred_distri.permute(0, 2, 1).contiguous()
160
+
161
+ dtype = pred_scores.dtype
162
+ batch_size = pred_scores.shape[0]
163
+ imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w)
164
+ anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
165
+
166
+ # targets
167
+ targets = torch.cat((batch["batch_idx"].view(-1, 1), batch["cls"].view(-1, 1), batch["bboxes"]), 1)
168
+ targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
169
+ gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
170
+ mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
171
+
172
+ # pboxes
173
+ pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
174
+
175
+ _, target_bboxes, target_scores, fg_mask, _ = self.assigner(
176
+ pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype),
177
+ anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt)
178
+
179
+ target_bboxes /= stride_tensor
180
+ target_scores_sum = target_scores.sum()
181
+
182
+ # cls loss
183
+ # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way
184
+ loss[1] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE
185
+
186
+ # bbox loss
187
+ if fg_mask.sum():
188
+ loss[0], loss[2] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores,
189
+ target_scores_sum, fg_mask)
190
+
191
+ loss[0] *= self.hyp.box # box gain
192
+ loss[1] *= self.hyp.cls # cls gain
193
+ loss[2] *= self.hyp.dfl # dfl gain
194
+
195
+ return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl)
196
+
197
+
198
+ @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
199
+ def train(cfg):
200
+ cfg.model = cfg.model or "yolov8n.yaml"
201
+ cfg.data = cfg.data or "coco128.yaml" # or yolo.ClassificationDataset("mnist")
202
+ # trainer = DetectionTrainer(cfg)
203
+ # trainer.train()
204
+ from ultralytics import YOLO
205
+ model = YOLO(cfg.model)
206
+ model.train(**cfg)
207
+
208
+
209
+ if __name__ == "__main__":
210
+ """
211
+ CLI usage:
212
+ python ultralytics/yolo/v8/detect/train.py model=yolov8n.yaml data=coco128 epochs=100 imgsz=640
213
+
214
+ TODO:
215
+ yolo task=detect mode=train model=yolov8n.yaml data=coco128.yaml epochs=100
216
+ """
217
+ train()
val.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ultralytics YOLO 🚀, GPL-3.0 license
2
+
3
+ import os
4
+ from pathlib import Path
5
+
6
+ import hydra
7
+ import numpy as np
8
+ import torch
9
+
10
+ from ultralytics.yolo.data import build_dataloader
11
+ from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader
12
+ from ultralytics.yolo.engine.validator import BaseValidator
13
+ from ultralytics.yolo.utils import DEFAULT_CONFIG, colorstr, ops, yaml_load
14
+ from ultralytics.yolo.utils.checks import check_file, check_requirements
15
+ from ultralytics.yolo.utils.metrics import ConfusionMatrix, DetMetrics, box_iou
16
+ from ultralytics.yolo.utils.plotting import output_to_target, plot_images
17
+ from ultralytics.yolo.utils.torch_utils import de_parallel
18
+
19
+
20
+ class DetectionValidator(BaseValidator):
21
+
22
+ def __init__(self, dataloader=None, save_dir=None, pbar=None, logger=None, args=None):
23
+ super().__init__(dataloader, save_dir, pbar, logger, args)
24
+ self.data_dict = yaml_load(check_file(self.args.data), append_filename=True) if self.args.data else None
25
+ self.is_coco = False
26
+ self.class_map = None
27
+ self.metrics = DetMetrics(save_dir=self.save_dir, plot=self.args.plots)
28
+ self.iouv = torch.linspace(0.5, 0.95, 10) # iou vector for mAP@0.5:0.95
29
+ self.niou = self.iouv.numel()
30
+
31
+ def preprocess(self, batch):
32
+ batch["img"] = batch["img"].to(self.device, non_blocking=True)
33
+ batch["img"] = (batch["img"].half() if self.args.half else batch["img"].float()) / 255
34
+ for k in ["batch_idx", "cls", "bboxes"]:
35
+ batch[k] = batch[k].to(self.device)
36
+
37
+ nb, _, height, width = batch["img"].shape
38
+ batch["bboxes"] *= torch.tensor((width, height, width, height), device=self.device) # to pixels
39
+ self.lb = [torch.cat([batch["cls"], batch["bboxes"]], dim=-1)[batch["batch_idx"] == i]
40
+ for i in range(nb)] if self.args.save_hybrid else [] # for autolabelling
41
+
42
+ return batch
43
+
44
+ def init_metrics(self, model):
45
+ head = model.model[-1] if self.training else model.model.model[-1]
46
+ val = self.data.get('val', '') # validation path
47
+ self.is_coco = isinstance(val, str) and val.endswith(f'coco{os.sep}val2017.txt') # is COCO dataset
48
+ self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000))
49
+ self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO
50
+ self.nc = head.nc
51
+ self.names = model.names
52
+ self.metrics.names = self.names
53
+ self.confusion_matrix = ConfusionMatrix(nc=self.nc)
54
+ self.seen = 0
55
+ self.jdict = []
56
+ self.stats = []
57
+
58
+ def get_desc(self):
59
+ return ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)")
60
+
61
+ def postprocess(self, preds):
62
+ preds = ops.non_max_suppression(preds,
63
+ self.args.conf,
64
+ self.args.iou,
65
+ labels=self.lb,
66
+ multi_label=True,
67
+ agnostic=self.args.single_cls,
68
+ max_det=self.args.max_det)
69
+ return preds
70
+
71
+ def update_metrics(self, preds, batch):
72
+ # Metrics
73
+ for si, pred in enumerate(preds):
74
+ idx = batch["batch_idx"] == si
75
+ cls = batch["cls"][idx]
76
+ bbox = batch["bboxes"][idx]
77
+ nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions
78
+ shape = batch["ori_shape"][si]
79
+ correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init
80
+ self.seen += 1
81
+
82
+ if npr == 0:
83
+ if nl:
84
+ self.stats.append((correct_bboxes, *torch.zeros((2, 0), device=self.device), cls.squeeze(-1)))
85
+ if self.args.plots:
86
+ self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1))
87
+ continue
88
+
89
+ # Predictions
90
+ if self.args.single_cls:
91
+ pred[:, 5] = 0
92
+ predn = pred.clone()
93
+ ops.scale_boxes(batch["img"][si].shape[1:], predn[:, :4], shape,
94
+ ratio_pad=batch["ratio_pad"][si]) # native-space pred
95
+
96
+ # Evaluate
97
+ if nl:
98
+ tbox = ops.xywh2xyxy(bbox) # target boxes
99
+ ops.scale_boxes(batch["img"][si].shape[1:], tbox, shape,
100
+ ratio_pad=batch["ratio_pad"][si]) # native-space labels
101
+ labelsn = torch.cat((cls, tbox), 1) # native-space labels
102
+ correct_bboxes = self._process_batch(predn, labelsn)
103
+ # TODO: maybe remove these `self.` arguments as they already are member variable
104
+ if self.args.plots:
105
+ self.confusion_matrix.process_batch(predn, labelsn)
106
+ self.stats.append((correct_bboxes, pred[:, 4], pred[:, 5], cls.squeeze(-1))) # (conf, pcls, tcls)
107
+
108
+ # Save
109
+ if self.args.save_json:
110
+ self.pred_to_json(predn, batch["im_file"][si])
111
+ # if self.args.save_txt:
112
+ # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
113
+
114
+ def get_stats(self):
115
+ stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*self.stats)] # to numpy
116
+ if len(stats) and stats[0].any():
117
+ self.metrics.process(*stats)
118
+ self.nt_per_class = np.bincount(stats[-1].astype(int), minlength=self.nc) # number of targets per class
119
+ return self.metrics.results_dict
120
+
121
+ def print_results(self):
122
+ pf = '%22s' + '%11i' * 2 + '%11.3g' * len(self.metrics.keys) # print format
123
+ self.logger.info(pf % ("all", self.seen, self.nt_per_class.sum(), *self.metrics.mean_results()))
124
+ if self.nt_per_class.sum() == 0:
125
+ self.logger.warning(
126
+ f'WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels')
127
+
128
+ # Print results per class
129
+ if (self.args.verbose or not self.training) and self.nc > 1 and len(self.stats):
130
+ for i, c in enumerate(self.metrics.ap_class_index):
131
+ self.logger.info(pf % (self.names[c], self.seen, self.nt_per_class[c], *self.metrics.class_result(i)))
132
+
133
+ if self.args.plots:
134
+ self.confusion_matrix.plot(save_dir=self.save_dir, names=list(self.names.values()))
135
+
136
+ def _process_batch(self, detections, labels):
137
+ """
138
+ Return correct prediction matrix
139
+ Arguments:
140
+ detections (array[N, 6]), x1, y1, x2, y2, conf, class
141
+ labels (array[M, 5]), class, x1, y1, x2, y2
142
+ Returns:
143
+ correct (array[N, 10]), for 10 IoU levels
144
+ """
145
+ iou = box_iou(labels[:, 1:], detections[:, :4])
146
+ correct = np.zeros((detections.shape[0], self.iouv.shape[0])).astype(bool)
147
+ correct_class = labels[:, 0:1] == detections[:, 5]
148
+ for i in range(len(self.iouv)):
149
+ x = torch.where((iou >= self.iouv[i]) & correct_class) # IoU > threshold and classes match
150
+ if x[0].shape[0]:
151
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]),
152
+ 1).cpu().numpy() # [label, detect, iou]
153
+ if x[0].shape[0] > 1:
154
+ matches = matches[matches[:, 2].argsort()[::-1]]
155
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
156
+ # matches = matches[matches[:, 2].argsort()[::-1]]
157
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
158
+ correct[matches[:, 1].astype(int), i] = True
159
+ return torch.tensor(correct, dtype=torch.bool, device=detections.device)
160
+
161
+ def get_dataloader(self, dataset_path, batch_size):
162
+ # TODO: manage splits differently
163
+ # calculate stride - check if model is initialized
164
+ gs = max(int(de_parallel(self.model).stride if self.model else 0), 32)
165
+ return create_dataloader(path=dataset_path,
166
+ imgsz=self.args.imgsz,
167
+ batch_size=batch_size,
168
+ stride=gs,
169
+ hyp=dict(self.args),
170
+ cache=False,
171
+ pad=0.5,
172
+ rect=True,
173
+ workers=self.args.workers,
174
+ prefix=colorstr(f'{self.args.mode}: '),
175
+ shuffle=False,
176
+ seed=self.args.seed)[0] if self.args.v5loader else \
177
+ build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, mode="val")[0]
178
+
179
+ def plot_val_samples(self, batch, ni):
180
+ plot_images(batch["img"],
181
+ batch["batch_idx"],
182
+ batch["cls"].squeeze(-1),
183
+ batch["bboxes"],
184
+ paths=batch["im_file"],
185
+ fname=self.save_dir / f"val_batch{ni}_labels.jpg",
186
+ names=self.names)
187
+
188
+ def plot_predictions(self, batch, preds, ni):
189
+ plot_images(batch["img"],
190
+ *output_to_target(preds, max_det=15),
191
+ paths=batch["im_file"],
192
+ fname=self.save_dir / f'val_batch{ni}_pred.jpg',
193
+ names=self.names) # pred
194
+
195
+ def pred_to_json(self, predn, filename):
196
+ stem = Path(filename).stem
197
+ image_id = int(stem) if stem.isnumeric() else stem
198
+ box = ops.xyxy2xywh(predn[:, :4]) # xywh
199
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
200
+ for p, b in zip(predn.tolist(), box.tolist()):
201
+ self.jdict.append({
202
+ 'image_id': image_id,
203
+ 'category_id': self.class_map[int(p[5])],
204
+ 'bbox': [round(x, 3) for x in b],
205
+ 'score': round(p[4], 5)})
206
+
207
+ def eval_json(self, stats):
208
+ if self.args.save_json and self.is_coco and len(self.jdict):
209
+ anno_json = self.data['path'] / "annotations/instances_val2017.json" # annotations
210
+ pred_json = self.save_dir / "predictions.json" # predictions
211
+ self.logger.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...')
212
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
213
+ check_requirements('pycocotools>=2.0.6')
214
+ from pycocotools.coco import COCO # noqa
215
+ from pycocotools.cocoeval import COCOeval # noqa
216
+
217
+ for x in anno_json, pred_json:
218
+ assert x.is_file(), f"{x} file not found"
219
+ anno = COCO(str(anno_json)) # init annotations api
220
+ pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
221
+ eval = COCOeval(anno, pred, 'bbox')
222
+ if self.is_coco:
223
+ eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
224
+ eval.evaluate()
225
+ eval.accumulate()
226
+ eval.summarize()
227
+ stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = eval.stats[:2] # update mAP50-95 and mAP50
228
+ except Exception as e:
229
+ self.logger.warning(f'pycocotools unable to run: {e}')
230
+ return stats
231
+
232
+
233
+ @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
234
+ def val(cfg):
235
+ cfg.data = cfg.data or "coco128.yaml"
236
+ validator = DetectionValidator(args=cfg)
237
+ validator(model=cfg.model)
238
+
239
+
240
+ if __name__ == "__main__":
241
+ val()
yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
3
+ size 6534387