Spaces:
Configuration error
Configuration error
Commit
·
4b50a03
1
Parent(s):
263dd84
Upload val.py
Browse files
val.py
ADDED
|
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
| 2 |
+
"""
|
| 3 |
+
Validate a trained YOLOv5 model accuracy on a custom dataset
|
| 4 |
+
|
| 5 |
+
Usage:
|
| 6 |
+
$ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640
|
| 7 |
+
|
| 8 |
+
Usage - formats:
|
| 9 |
+
$ python path/to/val.py --weights yolov5s.pt # PyTorch
|
| 10 |
+
yolov5s.torchscript # TorchScript
|
| 11 |
+
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
| 12 |
+
yolov5s.xml # OpenVINO
|
| 13 |
+
yolov5s.engine # TensorRT
|
| 14 |
+
yolov5s.mlmodel # CoreML (macOS-only)
|
| 15 |
+
yolov5s_saved_model # TensorFlow SavedModel
|
| 16 |
+
yolov5s.pb # TensorFlow GraphDef
|
| 17 |
+
yolov5s.tflite # TensorFlow Lite
|
| 18 |
+
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import argparse
|
| 22 |
+
import json
|
| 23 |
+
import os
|
| 24 |
+
import sys
|
| 25 |
+
from pathlib import Path
|
| 26 |
+
from threading import Thread
|
| 27 |
+
|
| 28 |
+
import numpy as np
|
| 29 |
+
import torch
|
| 30 |
+
from tqdm.auto import tqdm
|
| 31 |
+
|
| 32 |
+
FILE = Path(__file__).resolve()
|
| 33 |
+
ROOT = FILE.parents[0] # YOLOv5 root directory
|
| 34 |
+
if str(ROOT) not in sys.path:
|
| 35 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
| 36 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
| 37 |
+
|
| 38 |
+
from models.common import DetectMultiBackend
|
| 39 |
+
from utils.callbacks import Callbacks
|
| 40 |
+
from utils.datasets import create_dataloader
|
| 41 |
+
from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_yaml,
|
| 42 |
+
coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
|
| 43 |
+
scale_coords, xywh2xyxy, xyxy2xywh)
|
| 44 |
+
from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
|
| 45 |
+
from utils.plots import output_to_target, plot_images, plot_val_study
|
| 46 |
+
from utils.torch_utils import select_device, time_sync
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def save_one_txt(predn, save_conf, shape, file):
|
| 50 |
+
# Save one txt result
|
| 51 |
+
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
|
| 52 |
+
for *xyxy, conf, cls in predn.tolist():
|
| 53 |
+
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
| 54 |
+
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
| 55 |
+
with open(file, 'a') as f:
|
| 56 |
+
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def save_one_json(predn, jdict, path, class_map):
|
| 60 |
+
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
|
| 61 |
+
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
|
| 62 |
+
box = xyxy2xywh(predn[:, :4]) # xywh
|
| 63 |
+
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
| 64 |
+
for p, b in zip(predn.tolist(), box.tolist()):
|
| 65 |
+
jdict.append({
|
| 66 |
+
'image_id': image_id,
|
| 67 |
+
'category_id': class_map[int(p[5])],
|
| 68 |
+
'bbox': [round(x, 3) for x in b],
|
| 69 |
+
'score': round(p[4], 5)})
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def process_batch(detections, labels, iouv):
|
| 73 |
+
"""
|
| 74 |
+
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
|
| 75 |
+
Arguments:
|
| 76 |
+
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
|
| 77 |
+
labels (Array[M, 5]), class, x1, y1, x2, y2
|
| 78 |
+
Returns:
|
| 79 |
+
correct (Array[N, 10]), for 10 IoU levels
|
| 80 |
+
"""
|
| 81 |
+
correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
|
| 82 |
+
iou = box_iou(labels[:, 1:], detections[:, :4])
|
| 83 |
+
x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
|
| 84 |
+
if x[0].shape[0]:
|
| 85 |
+
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
|
| 86 |
+
if x[0].shape[0] > 1:
|
| 87 |
+
matches = matches[matches[:, 2].argsort()[::-1]]
|
| 88 |
+
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
| 89 |
+
# matches = matches[matches[:, 2].argsort()[::-1]]
|
| 90 |
+
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
| 91 |
+
matches = torch.from_numpy(matches).to(iouv.device)
|
| 92 |
+
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
|
| 93 |
+
return correct
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@torch.no_grad()
|
| 97 |
+
def run(
|
| 98 |
+
data,
|
| 99 |
+
weights=None, # model.pt path(s)
|
| 100 |
+
batch_size=32, # batch size
|
| 101 |
+
imgsz=640, # inference size (pixels)
|
| 102 |
+
conf_thres=0.001, # confidence threshold
|
| 103 |
+
iou_thres=0.6, # NMS IoU threshold
|
| 104 |
+
task='val', # train, val, test, speed or study
|
| 105 |
+
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
| 106 |
+
workers=8, # max dataloader workers (per RANK in DDP mode)
|
| 107 |
+
single_cls=False, # treat as single-class dataset
|
| 108 |
+
augment=False, # augmented inference
|
| 109 |
+
verbose=False, # verbose output
|
| 110 |
+
save_txt=False, # save results to *.txt
|
| 111 |
+
save_hybrid=False, # save label+prediction hybrid results to *.txt
|
| 112 |
+
save_conf=False, # save confidences in --save-txt labels
|
| 113 |
+
save_json=False, # save a COCO-JSON results file
|
| 114 |
+
project=ROOT / 'runs/val', # save to project/name
|
| 115 |
+
name='exp', # save to project/name
|
| 116 |
+
exist_ok=False, # existing project/name ok, do not increment
|
| 117 |
+
half=True, # use FP16 half-precision inference
|
| 118 |
+
dnn=False, # use OpenCV DNN for ONNX inference
|
| 119 |
+
model=None,
|
| 120 |
+
dataloader=None,
|
| 121 |
+
save_dir=Path(''),
|
| 122 |
+
plots=True,
|
| 123 |
+
callbacks=Callbacks(),
|
| 124 |
+
compute_loss=None,
|
| 125 |
+
):
|
| 126 |
+
# Initialize/load model and set device
|
| 127 |
+
training = model is not None
|
| 128 |
+
if training: # called by train.py
|
| 129 |
+
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
|
| 130 |
+
half &= device.type != 'cpu' # half precision only supported on CUDA
|
| 131 |
+
model.half() if half else model.float()
|
| 132 |
+
else: # called directly
|
| 133 |
+
device = select_device(device, batch_size=batch_size)
|
| 134 |
+
|
| 135 |
+
# Directories
|
| 136 |
+
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
| 137 |
+
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
| 138 |
+
|
| 139 |
+
# Load model
|
| 140 |
+
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
| 141 |
+
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
|
| 142 |
+
imgsz = check_img_size(imgsz, s=stride) # check image size
|
| 143 |
+
half = model.fp16 # FP16 supported on limited backends with CUDA
|
| 144 |
+
if engine:
|
| 145 |
+
batch_size = model.batch_size
|
| 146 |
+
else:
|
| 147 |
+
device = model.device
|
| 148 |
+
if not (pt or jit):
|
| 149 |
+
batch_size = 1 # export.py models default to batch-size 1
|
| 150 |
+
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
| 151 |
+
|
| 152 |
+
# Data
|
| 153 |
+
data = check_dataset(data) # check
|
| 154 |
+
|
| 155 |
+
# Configure
|
| 156 |
+
model.eval()
|
| 157 |
+
cuda = device.type != 'cpu'
|
| 158 |
+
is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
|
| 159 |
+
nc = 1 if single_cls else int(data['nc']) # number of classes
|
| 160 |
+
iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
|
| 161 |
+
niou = iouv.numel()
|
| 162 |
+
|
| 163 |
+
# Dataloader
|
| 164 |
+
if not training:
|
| 165 |
+
if pt and not single_cls: # check --weights are trained on --data
|
| 166 |
+
ncm = model.model.nc
|
| 167 |
+
assert ncm == nc, f'{weights[0]} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
|
| 168 |
+
f'classes). Pass correct combination of --weights and --data that are trained together.'
|
| 169 |
+
model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
|
| 170 |
+
pad = 0.0 if task in ('speed', 'benchmark') else 0.5
|
| 171 |
+
rect = False if task == 'benchmark' else pt # square inference for benchmarks
|
| 172 |
+
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
|
| 173 |
+
dataloader = create_dataloader(data[task],
|
| 174 |
+
imgsz,
|
| 175 |
+
batch_size,
|
| 176 |
+
stride,
|
| 177 |
+
single_cls,
|
| 178 |
+
pad=pad,
|
| 179 |
+
rect=rect,
|
| 180 |
+
workers=workers,
|
| 181 |
+
prefix=colorstr(f'{task}: '))[0]
|
| 182 |
+
|
| 183 |
+
seen = 0
|
| 184 |
+
confusion_matrix = ConfusionMatrix(nc=nc)
|
| 185 |
+
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
|
| 186 |
+
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
|
| 187 |
+
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
|
| 188 |
+
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
| 189 |
+
loss = torch.zeros(3, device=device)
|
| 190 |
+
jdict, stats, ap, ap_class = [], [], [], []
|
| 191 |
+
callbacks.run('on_val_start')
|
| 192 |
+
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
| 193 |
+
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
|
| 194 |
+
callbacks.run('on_val_batch_start')
|
| 195 |
+
t1 = time_sync()
|
| 196 |
+
if cuda:
|
| 197 |
+
im = im.to(device, non_blocking=True)
|
| 198 |
+
targets = targets.to(device)
|
| 199 |
+
im = im.half() if half else im.float() # uint8 to fp16/32
|
| 200 |
+
im /= 255 # 0 - 255 to 0.0 - 1.0
|
| 201 |
+
nb, _, height, width = im.shape # batch size, channels, height, width
|
| 202 |
+
t2 = time_sync()
|
| 203 |
+
dt[0] += t2 - t1
|
| 204 |
+
|
| 205 |
+
# Inference
|
| 206 |
+
out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs
|
| 207 |
+
dt[1] += time_sync() - t2
|
| 208 |
+
|
| 209 |
+
# Loss
|
| 210 |
+
if compute_loss:
|
| 211 |
+
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
|
| 212 |
+
|
| 213 |
+
# NMS
|
| 214 |
+
targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
|
| 215 |
+
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
|
| 216 |
+
t3 = time_sync()
|
| 217 |
+
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
|
| 218 |
+
dt[2] += time_sync() - t3
|
| 219 |
+
|
| 220 |
+
# Metrics
|
| 221 |
+
for si, pred in enumerate(out):
|
| 222 |
+
labels = targets[targets[:, 0] == si, 1:]
|
| 223 |
+
nl = len(labels)
|
| 224 |
+
tcls = labels[:, 0].tolist() if nl else [] # target class
|
| 225 |
+
path, shape = Path(paths[si]), shapes[si][0]
|
| 226 |
+
seen += 1
|
| 227 |
+
|
| 228 |
+
if len(pred) == 0:
|
| 229 |
+
if nl:
|
| 230 |
+
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
|
| 231 |
+
continue
|
| 232 |
+
|
| 233 |
+
# Predictions
|
| 234 |
+
if single_cls:
|
| 235 |
+
pred[:, 5] = 0
|
| 236 |
+
predn = pred.clone()
|
| 237 |
+
scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
|
| 238 |
+
|
| 239 |
+
# Evaluate
|
| 240 |
+
if nl:
|
| 241 |
+
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
|
| 242 |
+
scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
|
| 243 |
+
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
|
| 244 |
+
correct = process_batch(predn, labelsn, iouv)
|
| 245 |
+
if plots:
|
| 246 |
+
confusion_matrix.process_batch(predn, labelsn)
|
| 247 |
+
else:
|
| 248 |
+
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
|
| 249 |
+
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
|
| 250 |
+
|
| 251 |
+
# Save/log
|
| 252 |
+
if save_txt:
|
| 253 |
+
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
|
| 254 |
+
if save_json:
|
| 255 |
+
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
|
| 256 |
+
callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
|
| 257 |
+
|
| 258 |
+
# Plot images
|
| 259 |
+
if plots and batch_i < 3:
|
| 260 |
+
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
|
| 261 |
+
Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start()
|
| 262 |
+
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
|
| 263 |
+
Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start()
|
| 264 |
+
|
| 265 |
+
callbacks.run('on_val_batch_end')
|
| 266 |
+
|
| 267 |
+
# Compute metrics
|
| 268 |
+
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
|
| 269 |
+
if len(stats) and stats[0].any():
|
| 270 |
+
tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
|
| 271 |
+
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
|
| 272 |
+
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
|
| 273 |
+
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
|
| 274 |
+
else:
|
| 275 |
+
nt = torch.zeros(1)
|
| 276 |
+
|
| 277 |
+
# Print results
|
| 278 |
+
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
|
| 279 |
+
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
|
| 280 |
+
|
| 281 |
+
# Print results per class
|
| 282 |
+
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
| 283 |
+
for i, c in enumerate(ap_class):
|
| 284 |
+
LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
|
| 285 |
+
|
| 286 |
+
# Print speeds
|
| 287 |
+
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
|
| 288 |
+
if not training:
|
| 289 |
+
shape = (batch_size, 3, imgsz, imgsz)
|
| 290 |
+
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
|
| 291 |
+
|
| 292 |
+
# Plots
|
| 293 |
+
if plots:
|
| 294 |
+
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
|
| 295 |
+
callbacks.run('on_val_end')
|
| 296 |
+
|
| 297 |
+
# Save JSON
|
| 298 |
+
if save_json and len(jdict):
|
| 299 |
+
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
| 300 |
+
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
|
| 301 |
+
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
|
| 302 |
+
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
|
| 303 |
+
with open(pred_json, 'w') as f:
|
| 304 |
+
json.dump(jdict, f)
|
| 305 |
+
|
| 306 |
+
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
| 307 |
+
check_requirements(['pycocotools'])
|
| 308 |
+
from pycocotools.coco import COCO
|
| 309 |
+
from pycocotools.cocoeval import COCOeval
|
| 310 |
+
|
| 311 |
+
anno = COCO(anno_json) # init annotations api
|
| 312 |
+
pred = anno.loadRes(pred_json) # init predictions api
|
| 313 |
+
eval = COCOeval(anno, pred, 'bbox')
|
| 314 |
+
if is_coco:
|
| 315 |
+
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
|
| 316 |
+
eval.evaluate()
|
| 317 |
+
eval.accumulate()
|
| 318 |
+
eval.summarize()
|
| 319 |
+
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
|
| 320 |
+
except Exception as e:
|
| 321 |
+
LOGGER.info(f'pycocotools unable to run: {e}')
|
| 322 |
+
|
| 323 |
+
# Return results
|
| 324 |
+
model.float() # for training
|
| 325 |
+
if not training:
|
| 326 |
+
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
| 327 |
+
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
| 328 |
+
maps = np.zeros(nc) + map
|
| 329 |
+
for i, c in enumerate(ap_class):
|
| 330 |
+
maps[c] = ap[i]
|
| 331 |
+
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def parse_opt():
|
| 335 |
+
parser = argparse.ArgumentParser()
|
| 336 |
+
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
| 337 |
+
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
|
| 338 |
+
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
|
| 339 |
+
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
|
| 340 |
+
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
|
| 341 |
+
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
|
| 342 |
+
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
|
| 343 |
+
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
| 344 |
+
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
| 345 |
+
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
| 346 |
+
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
| 347 |
+
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
| 348 |
+
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
| 349 |
+
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
|
| 350 |
+
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
| 351 |
+
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
|
| 352 |
+
parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
|
| 353 |
+
parser.add_argument('--name', default='exp', help='save to project/name')
|
| 354 |
+
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
| 355 |
+
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
| 356 |
+
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
| 357 |
+
opt = parser.parse_args()
|
| 358 |
+
opt.data = check_yaml(opt.data) # check YAML
|
| 359 |
+
opt.save_json |= opt.data.endswith('coco.yaml')
|
| 360 |
+
opt.save_txt |= opt.save_hybrid
|
| 361 |
+
print_args(vars(opt))
|
| 362 |
+
return opt
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def main(opt):
|
| 366 |
+
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
| 367 |
+
|
| 368 |
+
if opt.task in ('train', 'val', 'test'): # run normally
|
| 369 |
+
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
|
| 370 |
+
LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.')
|
| 371 |
+
run(**vars(opt))
|
| 372 |
+
|
| 373 |
+
else:
|
| 374 |
+
weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
|
| 375 |
+
opt.half = True # FP16 for fastest results
|
| 376 |
+
if opt.task == 'speed': # speed benchmarks
|
| 377 |
+
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
|
| 378 |
+
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
|
| 379 |
+
for opt.weights in weights:
|
| 380 |
+
run(**vars(opt), plots=False)
|
| 381 |
+
|
| 382 |
+
elif opt.task == 'study': # speed vs mAP benchmarks
|
| 383 |
+
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
|
| 384 |
+
for opt.weights in weights:
|
| 385 |
+
f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
|
| 386 |
+
x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
|
| 387 |
+
for opt.imgsz in x: # img-size
|
| 388 |
+
LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
|
| 389 |
+
r, _, t = run(**vars(opt), plots=False)
|
| 390 |
+
y.append(r + t) # results and times
|
| 391 |
+
np.savetxt(f, y, fmt='%10.4g') # save
|
| 392 |
+
os.system('zip -r study.zip study_*.txt')
|
| 393 |
+
plot_val_study(x=x) # plot
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
if __name__ == "__main__":
|
| 397 |
+
opt = parse_opt()
|
| 398 |
+
main(opt)
|