SakshiRathi77 commited on
Commit
4dc211a
1 Parent(s): 7305008

Upload 7 files

Browse files
Files changed (7) hide show
  1. YOLOv9_Object_Detection_Tracking_and_Counting.py +167 -0
  2. train_dual.py +644 -0
  3. train_triple.py +636 -0
  4. val.py +389 -0
  5. val_dual.py +393 -0
  6. val_triple.py +391 -0
  7. view.py +1 -0
YOLOv9_Object_Detection_Tracking_and_Counting.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from base64 import b64encode
3
+ import cv2
4
+ import torch
5
+ import supervision as sv
6
+ from models.common import DetectMultiBackend, AutoShape
7
+ from utils.torch_utils import select_device
8
+ from utils.general import set_logging
9
+ from supervision import Detections as BaseDetections
10
+ from supervision.config import CLASS_NAME_DATA_FIELD
11
+ from IPython.display import HTML
12
+
13
+ # Extending Supervision's `Detections` to Handle YOLOv9 Results
14
+ class ExtendedDetections(BaseDetections):
15
+ @classmethod
16
+ def from_yolov9(cls, yolov9_results) -> 'ExtendedDetections':
17
+ xyxy, confidences, class_ids = [], [], []
18
+
19
+ for det in yolov9_results.pred:
20
+ for *xyxy_coords, conf, cls_id in reversed(det):
21
+ xyxy.append(torch.stack(xyxy_coords).cpu().numpy())
22
+ confidences.append(float(conf))
23
+ class_ids.append(int(cls_id))
24
+
25
+ class_names = np.array([yolov9_results.names[i] for i in class_ids])
26
+
27
+ if not xyxy:
28
+ return cls.empty()
29
+
30
+ return cls(
31
+ xyxy=np.vstack(xyxy),
32
+ confidence=np.array(confidences),
33
+ class_id=np.array(class_ids),
34
+ data={CLASS_NAME_DATA_FIELD: class_names},
35
+ )
36
+
37
+ # Loading the Model
38
+ set_logging(verbose=False)
39
+ device = select_device('cpu')
40
+ model = DetectMultiBackend(weights='best.pt', device=device, data='data/coco.yaml', fuse=True)
41
+ model = AutoShape(model)
42
+
43
+ # Function to Set YOLOv9 Post-processing Parameters
44
+ def prepare_yolov9(model, conf=0.2, iou=0.7, classes=None, agnostic_nms=False, max_det=1000):
45
+ model.conf = conf
46
+ model.iou = iou
47
+ model.classes = classes
48
+ model.agnostic = agnostic_nms
49
+ model.max_det = max_det
50
+ return model
51
+
52
+ # Function to Play Videos
53
+ def play(filename, width=500):
54
+ html = ''
55
+ video = open(filename, 'rb').read()
56
+ src = 'data:video/mp4;base64,' + b64encode(video).decode()
57
+ html += fr'<video width=500 controls autoplay loop><source src="%s" type="video/mp4"></video>' % src
58
+ return HTML(html)
59
+
60
+ # Constants
61
+ SOURCE_VIDEO_PATH = "test.mp4"
62
+ TARGET_VIDEO_PATH = "output.mp4"
63
+
64
+ # Simple Object Detection with YOLOv9 and Supervision
65
+ def prepare_model_and_video_info(model, config, source_path):
66
+ model = prepare_yolov9(model, **config)
67
+ video_info = sv.VideoInfo.from_video_path(source_path)
68
+ return model, video_info
69
+
70
+ def setup_annotator():
71
+ return sv.BoundingBoxAnnotator(thickness=2)
72
+
73
+ def simple_annotate_frame(frame, model, annotator):
74
+ frame_rgb = frame[..., ::-1]
75
+ results = model(frame_rgb, size=640, augment=False)
76
+ detections = ExtendedDetections.from_yolov9(results)
77
+
78
+ # Display the frame with detections using cv2.imshow
79
+ annotated_frame = annotator.annotate(scene=frame.copy(), detections=detections)
80
+ cv2.imshow("Detections", annotated_frame)
81
+ cv2.waitKey(1) # Adjust the delay as needed
82
+
83
+ return annotated_frame
84
+
85
+ def simple_process_video(model, config=dict(conf=0.1, iou=0.45, classes=None,), source_path=SOURCE_VIDEO_PATH, target_path=TARGET_VIDEO_PATH):
86
+ model, _ = prepare_model_and_video_info(model, config, source_path)
87
+ annotator = setup_annotator()
88
+
89
+ def callback(frame: np.ndarray, index: int) -> np.ndarray:
90
+ return simple_annotate_frame(frame, model, annotator)
91
+
92
+ sv.process_video(source_path=source_path, target_path=target_path, callback=callback)
93
+
94
+ # Advanced Detection, Tracking, and Counting with YOLOv9 and Supervision
95
+ def setup_model_and_video_info(model, config, source_path):
96
+ model = prepare_yolov9(model, **config)
97
+ video_info = sv.VideoInfo.from_video_path(source_path)
98
+ return model, video_info
99
+
100
+ def create_byte_tracker(video_info):
101
+ return sv.ByteTrack(track_thresh=0.25, track_buffer=250, match_thresh=0.95, frame_rate=video_info.fps)
102
+
103
+ def setup_annotators():
104
+ bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=2, color_lookup=sv.ColorLookup.TRACK)
105
+ round_box_annotator = sv.RoundBoxAnnotator(thickness=2, color_lookup=sv.ColorLookup.TRACK)
106
+ corner_annotator = sv.BoxCornerAnnotator(thickness=2, color_lookup=sv.ColorLookup.TRACK)
107
+ trace_annotator = sv.TraceAnnotator(thickness=2, trace_length=50, color_lookup=sv.ColorLookup.TRACK)
108
+ label_annotator = sv.LabelAnnotator(text_scale=0.5, color_lookup=sv.ColorLookup.TRACK)
109
+ return [bounding_box_annotator, round_box_annotator, corner_annotator], trace_annotator, label_annotator
110
+
111
+ def setup_counting_zone(counting_zone, video_info):
112
+ if counting_zone == 'whole_frame':
113
+ polygon = np.array([[0, 0], [video_info.width-1, 0], [video_info.width-1, video_info.height-1], [0, video_info.height-1]])
114
+ else:
115
+ polygon = np.array(counting_zone)
116
+ polygon_zone = sv.PolygonZone(polygon=polygon, frame_resolution_wh=(video_info.width, video_info.height), triggering_position=sv.Position.CENTER)
117
+ polygon_zone_annotator = sv.PolygonZoneAnnotator(polygon_zone, sv.Color.ROBOFLOW, thickness=2*(2 if counting_zone=='whole_frame' else 1), text_thickness=1, text_scale=0.5)
118
+ return polygon_zone, polygon_zone_annotator
119
+
120
+ def annotate_frame(frame, index, video_info, detections, byte_tracker, counting_zone, polygon_zone, polygon_zone_annotator, trace_annotator, annotators_list, label_annotator, show_labels, model):
121
+ detections = byte_tracker.update_with_detections(detections)
122
+ annotated_frame = frame.copy()
123
+
124
+ if counting_zone is not None:
125
+ is_inside_polygon = polygon_zone.trigger(detections)
126
+ detections = detections[is_inside_polygon]
127
+ annotated_frame = polygon_zone_annotator.annotate(annotated_frame)
128
+
129
+ annotated_frame = trace_annotator.annotate(scene=annotated_frame, detections=detections)
130
+
131
+ section_index = int(index / (video_info.total_frames / len(annotators_list)))
132
+ annotated_frame = annotators_list[section_index].annotate(scene=annotated_frame, detections=detections)
133
+
134
+ if show_labels:
135
+ annotated_frame = add_labels_to_frame(label_annotator, annotated_frame, detections, model)
136
+
137
+ return annotated_frame
138
+
139
+ def add_labels_to_frame(annotator, frame, detections, model):
140
+ labels = [f"#{tracker_id} {model.model.names[class_id]} {confidence:0.2f}" for confidence, class_id, tracker_id in zip(detections.confidence, detections.class_id, detections.tracker_id)]
141
+ return annotator.annotate(scene=frame, detections=detections, labels=labels)
142
+
143
+ def process_video(model, config=dict(conf=0.1, iou=0.45, classes=True,), counting_zone=True, show_labels=True, source_path=SOURCE_VIDEO_PATH, target_path=TARGET_VIDEO_PATH):
144
+ model, video_info = setup_model_and_video_info(model, config, source_path)
145
+ byte_tracker = create_byte_tracker(video_info)
146
+ annotators_list, trace_annotator, label_annotator = setup_annotators()
147
+ polygon_zone, polygon_zone_annotator = setup_counting_zone(counting_zone, video_info) if counting_zone else (None, None)
148
+
149
+ def callback(frame: np.ndarray, index: int) -> np.ndarray:
150
+ frame_rgb = frame[..., ::-1]
151
+ results = model(frame_rgb, size=608, augment=False)
152
+ detections = ExtendedDetections.from_yolov9(results)
153
+
154
+ # Display the frame with detections using cv2.imshow
155
+ annotated_frame = annotate_frame(frame, index, video_info, detections, byte_tracker, counting_zone, polygon_zone, polygon_zone_annotator, trace_annotator, annotators_list, label_annotator, show_labels, model)
156
+ cv2.imshow("Detections", annotated_frame)
157
+ cv2.waitKey(1) # Adjust the delay as needed
158
+
159
+ return annotated_frame
160
+
161
+ sv.process_video(source_path=source_path, target_path=target_path, callback=callback)
162
+
163
+ # Detection, Tracking, and Counting in Full Frame
164
+ yolov9_config=dict(conf=0.3, iou=0.45, classes=[0, 2, 3])
165
+ process_video(model, config=yolov9_config, counting_zone='whole_frame', show_labels=True, target_path='demo_file.mp4')
166
+
167
+ cv2.destroyAllWindows()
train_dual.py ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import math
3
+ import os
4
+ import random
5
+ import sys
6
+ import time
7
+ from copy import deepcopy
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+ import torch
13
+ import torch.distributed as dist
14
+ import torch.nn as nn
15
+ import yaml
16
+ from torch.optim import lr_scheduler
17
+ from tqdm import tqdm
18
+
19
+ FILE = Path(__file__).resolve()
20
+ ROOT = FILE.parents[0] # YOLO root directory
21
+ if str(ROOT) not in sys.path:
22
+ sys.path.append(str(ROOT)) # add ROOT to PATH
23
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
24
+
25
+ import val_dual as validate # for end-of-epoch mAP
26
+ from models.experimental import attempt_load
27
+ from models.yolo import Model
28
+ from utils.autoanchor import check_anchors
29
+ from utils.autobatch import check_train_batch_size
30
+ from utils.callbacks import Callbacks
31
+ from utils.dataloaders import create_dataloader
32
+ from utils.downloads import attempt_download, is_url
33
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
34
+ check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr,
35
+ get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
36
+ labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer,
37
+ yaml_save, one_flat_cycle)
38
+ from utils.loggers import Loggers
39
+ from utils.loggers.comet.comet_utils import check_comet_resume
40
+ from utils.loss_tal_dual import ComputeLoss
41
+ #from utils.loss_tal_dual import ComputeLossLH as ComputeLoss
42
+ #from utils.loss_tal_dual import ComputeLossLHCF as ComputeLoss
43
+ from utils.metrics import fitness
44
+ from utils.plots import plot_evolve
45
+ from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer,
46
+ smart_resume, torch_distributed_zero_first)
47
+
48
+ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
49
+ RANK = int(os.getenv('RANK', -1))
50
+ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
51
+ GIT_INFO = None#check_git_info()
52
+
53
+
54
+ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
55
+ save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
56
+ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
57
+ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
58
+ callbacks.run('on_pretrain_routine_start')
59
+
60
+ # Directories
61
+ w = save_dir / 'weights' # weights dir
62
+ (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
63
+ last, best = w / 'last.pt', w / 'best.pt'
64
+
65
+ # Hyperparameters
66
+ if isinstance(hyp, str):
67
+ with open(hyp, errors='ignore') as f:
68
+ hyp = yaml.safe_load(f) # load hyps dict
69
+ LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
70
+ hyp['anchor_t'] = 5.0
71
+ opt.hyp = hyp.copy() # for saving hyps to checkpoints
72
+
73
+ # Save run settings
74
+ if not evolve:
75
+ yaml_save(save_dir / 'hyp.yaml', hyp)
76
+ yaml_save(save_dir / 'opt.yaml', vars(opt))
77
+
78
+ # Loggers
79
+ data_dict = None
80
+ if RANK in {-1, 0}:
81
+ loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
82
+
83
+ # Register actions
84
+ for k in methods(loggers):
85
+ callbacks.register_action(k, callback=getattr(loggers, k))
86
+
87
+ # Process custom dataset artifact link
88
+ data_dict = loggers.remote_dataset
89
+ if resume: # If resuming runs from remote artifact
90
+ weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
91
+
92
+ # Config
93
+ plots = not evolve and not opt.noplots # create plots
94
+ cuda = device.type != 'cpu'
95
+ init_seeds(opt.seed + 1 + RANK, deterministic=True)
96
+ with torch_distributed_zero_first(LOCAL_RANK):
97
+ data_dict = data_dict or check_dataset(data) # check if None
98
+ train_path, val_path = data_dict['train'], data_dict['val']
99
+ nc = 1 if single_cls else int(data_dict['nc']) # number of classes
100
+ names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
101
+ #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
102
+ is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset
103
+
104
+ # Model
105
+ check_suffix(weights, '.pt') # check weights
106
+ pretrained = weights.endswith('.pt')
107
+ if pretrained:
108
+ with torch_distributed_zero_first(LOCAL_RANK):
109
+ weights = attempt_download(weights) # download if not found locally
110
+ ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
111
+ model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
112
+ exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
113
+ csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
114
+ csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
115
+ model.load_state_dict(csd, strict=False) # load
116
+ LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
117
+ else:
118
+ model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
119
+ amp = check_amp(model) # check AMP
120
+
121
+ # Freeze
122
+ freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
123
+ for k, v in model.named_parameters():
124
+ # v.requires_grad = True # train all layers TODO: uncomment this line as in master
125
+ # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
126
+ if any(x in k for x in freeze):
127
+ LOGGER.info(f'freezing {k}')
128
+ v.requires_grad = False
129
+
130
+ # Image size
131
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
132
+ imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
133
+
134
+ # Batch size
135
+ if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
136
+ batch_size = check_train_batch_size(model, imgsz, amp)
137
+ loggers.on_params_update({"batch_size": batch_size})
138
+
139
+ # Optimizer
140
+ nbs = 64 # nominal batch size
141
+ accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
142
+ hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
143
+ optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
144
+
145
+ # Scheduler
146
+ if opt.cos_lr:
147
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
148
+ elif opt.flat_cos_lr:
149
+ lf = one_flat_cycle(1, hyp['lrf'], epochs) # flat cosine 1->hyp['lrf']
150
+ elif opt.fixed_lr:
151
+ lf = lambda x: 1.0
152
+ else:
153
+ lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
154
+
155
+ # def lf(x): # saw
156
+ # return (1 - (x % 30) / 30) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
157
+ #
158
+ # def lf(x): # triangle start at min
159
+ # return 2 * abs(x / 30 - math.floor(x / 30 + 1 / 2)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
160
+ #
161
+ # def lf(x): # triangle start at max
162
+ # return 2 * abs(x / 32 + .5 - math.floor(x / 32 + 1)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
163
+
164
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
165
+ # from utils.plots import plot_lr_scheduler; plot_lr_scheduler(optimizer, scheduler, epochs)
166
+
167
+ # EMA
168
+ ema = ModelEMA(model) if RANK in {-1, 0} else None
169
+
170
+ # Resume
171
+ best_fitness, start_epoch = 0.0, 0
172
+ if pretrained:
173
+ if resume:
174
+ best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
175
+ del ckpt, csd
176
+
177
+ # DP mode
178
+ if cuda and RANK == -1 and torch.cuda.device_count() > 1:
179
+ LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.')
180
+ model = torch.nn.DataParallel(model)
181
+
182
+ # SyncBatchNorm
183
+ if opt.sync_bn and cuda and RANK != -1:
184
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
185
+ LOGGER.info('Using SyncBatchNorm()')
186
+
187
+ # Trainloader
188
+ train_loader, dataset = create_dataloader(train_path,
189
+ imgsz,
190
+ batch_size // WORLD_SIZE,
191
+ gs,
192
+ single_cls,
193
+ hyp=hyp,
194
+ augment=True,
195
+ cache=None if opt.cache == 'val' else opt.cache,
196
+ rect=opt.rect,
197
+ rank=LOCAL_RANK,
198
+ workers=workers,
199
+ image_weights=opt.image_weights,
200
+ close_mosaic=opt.close_mosaic != 0,
201
+ quad=opt.quad,
202
+ prefix=colorstr('train: '),
203
+ shuffle=True,
204
+ min_items=opt.min_items)
205
+ labels = np.concatenate(dataset.labels, 0)
206
+ mlc = int(labels[:, 0].max()) # max label class
207
+ assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
208
+
209
+ # Process 0
210
+ if RANK in {-1, 0}:
211
+ val_loader = create_dataloader(val_path,
212
+ imgsz,
213
+ batch_size // WORLD_SIZE * 2,
214
+ gs,
215
+ single_cls,
216
+ hyp=hyp,
217
+ cache=None if noval else opt.cache,
218
+ rect=True,
219
+ rank=-1,
220
+ workers=workers * 2,
221
+ pad=0.5,
222
+ prefix=colorstr('val: '))[0]
223
+
224
+ if not resume:
225
+ # if not opt.noautoanchor:
226
+ # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
227
+ model.half().float() # pre-reduce anchor precision
228
+
229
+ callbacks.run('on_pretrain_routine_end', labels, names)
230
+
231
+ # DDP mode
232
+ if cuda and RANK != -1:
233
+ model = smart_DDP(model)
234
+
235
+ # Model attributes
236
+ nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
237
+ #hyp['box'] *= 3 / nl # scale to layers
238
+ #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
239
+ #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
240
+ hyp['label_smoothing'] = opt.label_smoothing
241
+ model.nc = nc # attach number of classes to model
242
+ model.hyp = hyp # attach hyperparameters to model
243
+ model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
244
+ model.names = names
245
+
246
+ # Start training
247
+ t0 = time.time()
248
+ nb = len(train_loader) # number of batches
249
+ nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
250
+ # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
251
+ last_opt_step = -1
252
+ maps = np.zeros(nc) # mAP per class
253
+ results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
254
+ scheduler.last_epoch = start_epoch - 1 # do not move
255
+ scaler = torch.cuda.amp.GradScaler(enabled=amp)
256
+ stopper, stop = EarlyStopping(patience=opt.patience), False
257
+ compute_loss = ComputeLoss(model) # init loss class
258
+ callbacks.run('on_train_start')
259
+ LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
260
+ f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
261
+ f"Logging results to {colorstr('bold', save_dir)}\n"
262
+ f'Starting training for {epochs} epochs...')
263
+ for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
264
+ callbacks.run('on_train_epoch_start')
265
+ model.train()
266
+
267
+ # Update image weights (optional, single-GPU only)
268
+ if opt.image_weights:
269
+ cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
270
+ iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
271
+ dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
272
+ if epoch == (epochs - opt.close_mosaic):
273
+ LOGGER.info("Closing dataloader mosaic")
274
+ dataset.mosaic = False
275
+
276
+ # Update mosaic border (optional)
277
+ # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
278
+ # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
279
+
280
+ mloss = torch.zeros(3, device=device) # mean losses
281
+ if RANK != -1:
282
+ train_loader.sampler.set_epoch(epoch)
283
+ pbar = enumerate(train_loader)
284
+ LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size'))
285
+ if RANK in {-1, 0}:
286
+ pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
287
+ optimizer.zero_grad()
288
+ for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
289
+ callbacks.run('on_train_batch_start')
290
+ ni = i + nb * epoch # number integrated batches (since train start)
291
+ imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
292
+
293
+ # Warmup
294
+ if ni <= nw:
295
+ xi = [0, nw] # x interp
296
+ # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
297
+ accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
298
+ for j, x in enumerate(optimizer.param_groups):
299
+ # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
300
+ x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
301
+ if 'momentum' in x:
302
+ x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
303
+
304
+ # Multi-scale
305
+ if opt.multi_scale:
306
+ sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
307
+ sf = sz / max(imgs.shape[2:]) # scale factor
308
+ if sf != 1:
309
+ ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
310
+ imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
311
+
312
+ # Forward
313
+ with torch.cuda.amp.autocast(amp):
314
+ pred = model(imgs) # forward
315
+ loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
316
+ if RANK != -1:
317
+ loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
318
+ if opt.quad:
319
+ loss *= 4.
320
+
321
+ # Backward
322
+ scaler.scale(loss).backward()
323
+
324
+ # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
325
+ if ni - last_opt_step >= accumulate:
326
+ scaler.unscale_(optimizer) # unscale gradients
327
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
328
+ scaler.step(optimizer) # optimizer.step
329
+ scaler.update()
330
+ optimizer.zero_grad()
331
+ if ema:
332
+ ema.update(model)
333
+ last_opt_step = ni
334
+
335
+ # Log
336
+ if RANK in {-1, 0}:
337
+ mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
338
+ mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
339
+ pbar.set_description(('%11s' * 2 + '%11.4g' * 5) %
340
+ (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
341
+ callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss))
342
+ if callbacks.stop_training:
343
+ return
344
+ # end batch ------------------------------------------------------------------------------------------------
345
+
346
+ # Scheduler
347
+ lr = [x['lr'] for x in optimizer.param_groups] # for loggers
348
+ scheduler.step()
349
+
350
+ if RANK in {-1, 0}:
351
+ # mAP
352
+ callbacks.run('on_train_epoch_end', epoch=epoch)
353
+ ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
354
+ final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
355
+ if not noval or final_epoch: # Calculate mAP
356
+ results, maps, _ = validate.run(data_dict,
357
+ batch_size=batch_size // WORLD_SIZE * 2,
358
+ imgsz=imgsz,
359
+ half=amp,
360
+ model=ema.ema,
361
+ single_cls=single_cls,
362
+ dataloader=val_loader,
363
+ save_dir=save_dir,
364
+ plots=False,
365
+ callbacks=callbacks,
366
+ compute_loss=compute_loss)
367
+
368
+ # Update best mAP
369
+ fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
370
+ stop = stopper(epoch=epoch, fitness=fi) # early stop check
371
+ if fi > best_fitness:
372
+ best_fitness = fi
373
+ log_vals = list(mloss) + list(results) + lr
374
+ callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
375
+
376
+ # Save model
377
+ if (not nosave) or (final_epoch and not evolve): # if save
378
+ ckpt = {
379
+ 'epoch': epoch,
380
+ 'best_fitness': best_fitness,
381
+ 'model': deepcopy(de_parallel(model)).half(),
382
+ 'ema': deepcopy(ema.ema).half(),
383
+ 'updates': ema.updates,
384
+ 'optimizer': optimizer.state_dict(),
385
+ 'opt': vars(opt),
386
+ 'git': GIT_INFO, # {remote, branch, commit} if a git repo
387
+ 'date': datetime.now().isoformat()}
388
+
389
+ # Save last, best and delete
390
+ torch.save(ckpt, last)
391
+ if best_fitness == fi:
392
+ torch.save(ckpt, best)
393
+ if opt.save_period > 0 and epoch % opt.save_period == 0:
394
+ torch.save(ckpt, w / f'epoch{epoch}.pt')
395
+ del ckpt
396
+ callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
397
+
398
+ # EarlyStopping
399
+ if RANK != -1: # if DDP training
400
+ broadcast_list = [stop if RANK == 0 else None]
401
+ dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
402
+ if RANK != 0:
403
+ stop = broadcast_list[0]
404
+ if stop:
405
+ break # must break all DDP ranks
406
+
407
+ # end epoch ----------------------------------------------------------------------------------------------------
408
+ # end training -----------------------------------------------------------------------------------------------------
409
+ if RANK in {-1, 0}:
410
+ LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
411
+ for f in last, best:
412
+ if f.exists():
413
+ strip_optimizer(f) # strip optimizers
414
+ if f is best:
415
+ LOGGER.info(f'\nValidating {f}...')
416
+ results, _, _ = validate.run(
417
+ data_dict,
418
+ batch_size=batch_size // WORLD_SIZE * 2,
419
+ imgsz=imgsz,
420
+ model=attempt_load(f, device).half(),
421
+ single_cls=single_cls,
422
+ dataloader=val_loader,
423
+ save_dir=save_dir,
424
+ save_json=is_coco,
425
+ verbose=True,
426
+ plots=plots,
427
+ callbacks=callbacks,
428
+ compute_loss=compute_loss) # val best model with plots
429
+ if is_coco:
430
+ callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
431
+
432
+ callbacks.run('on_train_end', last, best, epoch, results)
433
+
434
+ torch.cuda.empty_cache()
435
+ return results
436
+
437
+
438
+ def parse_opt(known=False):
439
+ parser = argparse.ArgumentParser()
440
+ # parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='initial weights path')
441
+ # parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
442
+ parser.add_argument('--weights', type=str, default='', help='initial weights path')
443
+ parser.add_argument('--cfg', type=str, default='yolo.yaml', help='model.yaml path')
444
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
445
+ parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-high.yaml', help='hyperparameters path')
446
+ parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
447
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
448
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
449
+ parser.add_argument('--rect', action='store_true', help='rectangular training')
450
+ parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
451
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
452
+ parser.add_argument('--noval', action='store_true', help='only validate final epoch')
453
+ parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
454
+ parser.add_argument('--noplots', action='store_true', help='save no plot files')
455
+ parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
456
+ parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
457
+ parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
458
+ parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
459
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
460
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
461
+ parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
462
+ parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer')
463
+ parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
464
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
465
+ parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
466
+ parser.add_argument('--name', default='exp', help='save to project/name')
467
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
468
+ parser.add_argument('--quad', action='store_true', help='quad dataloader')
469
+ parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
470
+ parser.add_argument('--flat-cos-lr', action='store_true', help='flat cosine LR scheduler')
471
+ parser.add_argument('--fixed-lr', action='store_true', help='fixed LR scheduler')
472
+ parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
473
+ parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
474
+ parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
475
+ parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
476
+ parser.add_argument('--seed', type=int, default=0, help='Global training seed')
477
+ parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
478
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
479
+ parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental')
480
+
481
+ # Logger arguments
482
+ parser.add_argument('--entity', default=None, help='Entity')
483
+ parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option')
484
+ parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval')
485
+ parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use')
486
+
487
+ return parser.parse_known_args()[0] if known else parser.parse_args()
488
+
489
+
490
+ def main(opt, callbacks=Callbacks()):
491
+ # Checks
492
+ if RANK in {-1, 0}:
493
+ print_args(vars(opt))
494
+ #check_git_status()
495
+ #check_requirements()
496
+
497
+ # Resume (from specified or most recent last.pt)
498
+ if opt.resume and not check_comet_resume(opt) and not opt.evolve:
499
+ last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
500
+ opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
501
+ opt_data = opt.data # original dataset
502
+ if opt_yaml.is_file():
503
+ with open(opt_yaml, errors='ignore') as f:
504
+ d = yaml.safe_load(f)
505
+ else:
506
+ d = torch.load(last, map_location='cpu')['opt']
507
+ opt = argparse.Namespace(**d) # replace
508
+ opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
509
+ if is_url(opt_data):
510
+ opt.data = check_file(opt_data) # avoid HUB resume auth timeout
511
+ else:
512
+ opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
513
+ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
514
+ assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
515
+ if opt.evolve:
516
+ if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
517
+ opt.project = str(ROOT / 'runs/evolve')
518
+ opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
519
+ if opt.name == 'cfg':
520
+ opt.name = Path(opt.cfg).stem # use model.yaml as name
521
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
522
+
523
+ # DDP mode
524
+ device = select_device(opt.device, batch_size=opt.batch_size)
525
+ if LOCAL_RANK != -1:
526
+ msg = 'is not compatible with YOLO Multi-GPU DDP training'
527
+ assert not opt.image_weights, f'--image-weights {msg}'
528
+ assert not opt.evolve, f'--evolve {msg}'
529
+ assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
530
+ assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
531
+ assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
532
+ torch.cuda.set_device(LOCAL_RANK)
533
+ device = torch.device('cuda', LOCAL_RANK)
534
+ dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
535
+
536
+ # Train
537
+ if not opt.evolve:
538
+ train(opt.hyp, opt, device, callbacks)
539
+
540
+ # Evolve hyperparameters (optional)
541
+ else:
542
+ # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
543
+ meta = {
544
+ 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
545
+ 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
546
+ 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
547
+ 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
548
+ 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
549
+ 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
550
+ 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
551
+ 'box': (1, 0.02, 0.2), # box loss gain
552
+ 'cls': (1, 0.2, 4.0), # cls loss gain
553
+ 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
554
+ 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
555
+ 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
556
+ 'iou_t': (0, 0.1, 0.7), # IoU training threshold
557
+ 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
558
+ 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
559
+ 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
560
+ 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
561
+ 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
562
+ 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
563
+ 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
564
+ 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
565
+ 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
566
+ 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
567
+ 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
568
+ 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
569
+ 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
570
+ 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
571
+ 'mixup': (1, 0.0, 1.0), # image mixup (probability)
572
+ 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
573
+
574
+ with open(opt.hyp, errors='ignore') as f:
575
+ hyp = yaml.safe_load(f) # load hyps dict
576
+ if 'anchors' not in hyp: # anchors commented in hyp.yaml
577
+ hyp['anchors'] = 3
578
+ if opt.noautoanchor:
579
+ del hyp['anchors'], meta['anchors']
580
+ opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
581
+ # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
582
+ evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
583
+ if opt.bucket:
584
+ os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists
585
+
586
+ for _ in range(opt.evolve): # generations to evolve
587
+ if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
588
+ # Select parent(s)
589
+ parent = 'single' # parent selection method: 'single' or 'weighted'
590
+ x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
591
+ n = min(5, len(x)) # number of previous results to consider
592
+ x = x[np.argsort(-fitness(x))][:n] # top n mutations
593
+ w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
594
+ if parent == 'single' or len(x) == 1:
595
+ # x = x[random.randint(0, n - 1)] # random selection
596
+ x = x[random.choices(range(n), weights=w)[0]] # weighted selection
597
+ elif parent == 'weighted':
598
+ x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
599
+
600
+ # Mutate
601
+ mp, s = 0.8, 0.2 # mutation probability, sigma
602
+ npr = np.random
603
+ npr.seed(int(time.time()))
604
+ g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
605
+ ng = len(meta)
606
+ v = np.ones(ng)
607
+ while all(v == 1): # mutate until a change occurs (prevent duplicates)
608
+ v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
609
+ for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
610
+ hyp[k] = float(x[i + 7] * v[i]) # mutate
611
+
612
+ # Constrain to limits
613
+ for k, v in meta.items():
614
+ hyp[k] = max(hyp[k], v[1]) # lower limit
615
+ hyp[k] = min(hyp[k], v[2]) # upper limit
616
+ hyp[k] = round(hyp[k], 5) # significant digits
617
+
618
+ # Train mutation
619
+ results = train(hyp.copy(), opt, device, callbacks)
620
+ callbacks = Callbacks()
621
+ # Write mutation results
622
+ keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss',
623
+ 'val/obj_loss', 'val/cls_loss')
624
+ print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket)
625
+
626
+ # Plot results
627
+ plot_evolve(evolve_csv)
628
+ LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
629
+ f"Results saved to {colorstr('bold', save_dir)}\n"
630
+ f'Usage example: $ python train.py --hyp {evolve_yaml}')
631
+
632
+
633
+ def run(**kwargs):
634
+ # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt')
635
+ opt = parse_opt(True)
636
+ for k, v in kwargs.items():
637
+ setattr(opt, k, v)
638
+ main(opt)
639
+ return opt
640
+
641
+
642
+ if __name__ == "__main__":
643
+ opt = parse_opt()
644
+ main(opt)
train_triple.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import math
3
+ import os
4
+ import random
5
+ import sys
6
+ import time
7
+ from copy import deepcopy
8
+ from datetime import datetime
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+ import torch
13
+ import torch.distributed as dist
14
+ import torch.nn as nn
15
+ import yaml
16
+ from torch.optim import lr_scheduler
17
+ from tqdm import tqdm
18
+
19
+ FILE = Path(__file__).resolve()
20
+ ROOT = FILE.parents[0] # YOLO root directory
21
+ if str(ROOT) not in sys.path:
22
+ sys.path.append(str(ROOT)) # add ROOT to PATH
23
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
24
+
25
+ import val_triple as validate # for end-of-epoch mAP
26
+ from models.experimental import attempt_load
27
+ from models.yolo import Model
28
+ from utils.autoanchor import check_anchors
29
+ from utils.autobatch import check_train_batch_size
30
+ from utils.callbacks import Callbacks
31
+ from utils.dataloaders import create_dataloader
32
+ from utils.downloads import attempt_download, is_url
33
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
34
+ check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr,
35
+ get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
36
+ labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer,
37
+ yaml_save)
38
+ from utils.loggers import Loggers
39
+ from utils.loggers.comet.comet_utils import check_comet_resume
40
+ from utils.loss_tal_triple import ComputeLoss
41
+ from utils.metrics import fitness
42
+ from utils.plots import plot_evolve
43
+ from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer,
44
+ smart_resume, torch_distributed_zero_first)
45
+
46
+ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
47
+ RANK = int(os.getenv('RANK', -1))
48
+ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
49
+ GIT_INFO = None#check_git_info()
50
+
51
+
52
+ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
53
+ save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \
54
+ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
55
+ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze
56
+ callbacks.run('on_pretrain_routine_start')
57
+
58
+ # Directories
59
+ w = save_dir / 'weights' # weights dir
60
+ (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
61
+ last, best = w / 'last.pt', w / 'best.pt'
62
+
63
+ # Hyperparameters
64
+ if isinstance(hyp, str):
65
+ with open(hyp, errors='ignore') as f:
66
+ hyp = yaml.safe_load(f) # load hyps dict
67
+ LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
68
+ hyp['anchor_t'] = 5.0
69
+ opt.hyp = hyp.copy() # for saving hyps to checkpoints
70
+
71
+ # Save run settings
72
+ if not evolve:
73
+ yaml_save(save_dir / 'hyp.yaml', hyp)
74
+ yaml_save(save_dir / 'opt.yaml', vars(opt))
75
+
76
+ # Loggers
77
+ data_dict = None
78
+ if RANK in {-1, 0}:
79
+ loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance
80
+
81
+ # Register actions
82
+ for k in methods(loggers):
83
+ callbacks.register_action(k, callback=getattr(loggers, k))
84
+
85
+ # Process custom dataset artifact link
86
+ data_dict = loggers.remote_dataset
87
+ if resume: # If resuming runs from remote artifact
88
+ weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size
89
+
90
+ # Config
91
+ plots = not evolve and not opt.noplots # create plots
92
+ cuda = device.type != 'cpu'
93
+ init_seeds(opt.seed + 1 + RANK, deterministic=True)
94
+ with torch_distributed_zero_first(LOCAL_RANK):
95
+ data_dict = data_dict or check_dataset(data) # check if None
96
+ train_path, val_path = data_dict['train'], data_dict['val']
97
+ nc = 1 if single_cls else int(data_dict['nc']) # number of classes
98
+ names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
99
+ #is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
100
+ is_coco = isinstance(val_path, str) and val_path.endswith('val2017.txt') # COCO dataset
101
+
102
+ # Model
103
+ check_suffix(weights, '.pt') # check weights
104
+ pretrained = weights.endswith('.pt')
105
+ if pretrained:
106
+ with torch_distributed_zero_first(LOCAL_RANK):
107
+ weights = attempt_download(weights) # download if not found locally
108
+ ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
109
+ model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
110
+ exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
111
+ csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
112
+ csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
113
+ model.load_state_dict(csd, strict=False) # load
114
+ LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
115
+ else:
116
+ model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
117
+ amp = check_amp(model) # check AMP
118
+
119
+ # Freeze
120
+ freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
121
+ for k, v in model.named_parameters():
122
+ # v.requires_grad = True # train all layers TODO: uncomment this line as in master
123
+ # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
124
+ if any(x in k for x in freeze):
125
+ LOGGER.info(f'freezing {k}')
126
+ v.requires_grad = False
127
+
128
+ # Image size
129
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
130
+ imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
131
+
132
+ # Batch size
133
+ if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
134
+ batch_size = check_train_batch_size(model, imgsz, amp)
135
+ loggers.on_params_update({"batch_size": batch_size})
136
+
137
+ # Optimizer
138
+ nbs = 64 # nominal batch size
139
+ accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
140
+ hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
141
+ optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
142
+
143
+ # Scheduler
144
+ if opt.cos_lr:
145
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
146
+ else:
147
+ lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
148
+
149
+ # def lf(x): # saw
150
+ # return (1 - (x % 30) / 30) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
151
+ #
152
+ # def lf(x): # triangle start at min
153
+ # return 2 * abs(x / 30 - math.floor(x / 30 + 1 / 2)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
154
+ #
155
+ # def lf(x): # triangle start at max
156
+ # return 2 * abs(x / 32 + .5 - math.floor(x / 32 + 1)) * (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf']
157
+
158
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
159
+ # from utils.plots import plot_lr_scheduler; plot_lr_scheduler(optimizer, scheduler, epochs)
160
+
161
+ # EMA
162
+ ema = ModelEMA(model) if RANK in {-1, 0} else None
163
+
164
+ # Resume
165
+ best_fitness, start_epoch = 0.0, 0
166
+ if pretrained:
167
+ if resume:
168
+ best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
169
+ del ckpt, csd
170
+
171
+ # DP mode
172
+ if cuda and RANK == -1 and torch.cuda.device_count() > 1:
173
+ LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.')
174
+ model = torch.nn.DataParallel(model)
175
+
176
+ # SyncBatchNorm
177
+ if opt.sync_bn and cuda and RANK != -1:
178
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
179
+ LOGGER.info('Using SyncBatchNorm()')
180
+
181
+ # Trainloader
182
+ train_loader, dataset = create_dataloader(train_path,
183
+ imgsz,
184
+ batch_size // WORLD_SIZE,
185
+ gs,
186
+ single_cls,
187
+ hyp=hyp,
188
+ augment=True,
189
+ cache=None if opt.cache == 'val' else opt.cache,
190
+ rect=opt.rect,
191
+ rank=LOCAL_RANK,
192
+ workers=workers,
193
+ image_weights=opt.image_weights,
194
+ close_mosaic=opt.close_mosaic != 0,
195
+ quad=opt.quad,
196
+ prefix=colorstr('train: '),
197
+ shuffle=True,
198
+ min_items=opt.min_items)
199
+ labels = np.concatenate(dataset.labels, 0)
200
+ mlc = int(labels[:, 0].max()) # max label class
201
+ assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
202
+
203
+ # Process 0
204
+ if RANK in {-1, 0}:
205
+ val_loader = create_dataloader(val_path,
206
+ imgsz,
207
+ batch_size // WORLD_SIZE * 2,
208
+ gs,
209
+ single_cls,
210
+ hyp=hyp,
211
+ cache=None if noval else opt.cache,
212
+ rect=True,
213
+ rank=-1,
214
+ workers=workers * 2,
215
+ pad=0.5,
216
+ prefix=colorstr('val: '))[0]
217
+
218
+ if not resume:
219
+ # if not opt.noautoanchor:
220
+ # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
221
+ model.half().float() # pre-reduce anchor precision
222
+
223
+ callbacks.run('on_pretrain_routine_end', labels, names)
224
+
225
+ # DDP mode
226
+ if cuda and RANK != -1:
227
+ model = smart_DDP(model)
228
+
229
+ # Model attributes
230
+ nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
231
+ #hyp['box'] *= 3 / nl # scale to layers
232
+ #hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
233
+ #hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
234
+ hyp['label_smoothing'] = opt.label_smoothing
235
+ model.nc = nc # attach number of classes to model
236
+ model.hyp = hyp # attach hyperparameters to model
237
+ model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
238
+ model.names = names
239
+
240
+ # Start training
241
+ t0 = time.time()
242
+ nb = len(train_loader) # number of batches
243
+ nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
244
+ # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
245
+ last_opt_step = -1
246
+ maps = np.zeros(nc) # mAP per class
247
+ results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
248
+ scheduler.last_epoch = start_epoch - 1 # do not move
249
+ scaler = torch.cuda.amp.GradScaler(enabled=amp)
250
+ stopper, stop = EarlyStopping(patience=opt.patience), False
251
+ compute_loss = ComputeLoss(model) # init loss class
252
+ callbacks.run('on_train_start')
253
+ LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
254
+ f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
255
+ f"Logging results to {colorstr('bold', save_dir)}\n"
256
+ f'Starting training for {epochs} epochs...')
257
+ for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
258
+ callbacks.run('on_train_epoch_start')
259
+ model.train()
260
+
261
+ # Update image weights (optional, single-GPU only)
262
+ if opt.image_weights:
263
+ cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
264
+ iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
265
+ dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
266
+ if epoch == (epochs - opt.close_mosaic):
267
+ LOGGER.info("Closing dataloader mosaic")
268
+ dataset.mosaic = False
269
+
270
+ # Update mosaic border (optional)
271
+ # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
272
+ # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
273
+
274
+ mloss = torch.zeros(3, device=device) # mean losses
275
+ if RANK != -1:
276
+ train_loader.sampler.set_epoch(epoch)
277
+ pbar = enumerate(train_loader)
278
+ LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'cls_loss', 'dfl_loss', 'Instances', 'Size'))
279
+ if RANK in {-1, 0}:
280
+ pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
281
+ optimizer.zero_grad()
282
+ for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
283
+ callbacks.run('on_train_batch_start')
284
+ ni = i + nb * epoch # number integrated batches (since train start)
285
+ imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
286
+
287
+ # Warmup
288
+ if ni <= nw:
289
+ xi = [0, nw] # x interp
290
+ # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
291
+ accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
292
+ for j, x in enumerate(optimizer.param_groups):
293
+ # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
294
+ x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
295
+ if 'momentum' in x:
296
+ x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
297
+
298
+ # Multi-scale
299
+ if opt.multi_scale:
300
+ sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
301
+ sf = sz / max(imgs.shape[2:]) # scale factor
302
+ if sf != 1:
303
+ ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
304
+ imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
305
+
306
+ # Forward
307
+ with torch.cuda.amp.autocast(amp):
308
+ pred = model(imgs) # forward
309
+ loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
310
+ if RANK != -1:
311
+ loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
312
+ if opt.quad:
313
+ loss *= 4.
314
+
315
+ # Backward
316
+ scaler.scale(loss).backward()
317
+
318
+ # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
319
+ if ni - last_opt_step >= accumulate:
320
+ scaler.unscale_(optimizer) # unscale gradients
321
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
322
+ scaler.step(optimizer) # optimizer.step
323
+ scaler.update()
324
+ optimizer.zero_grad()
325
+ if ema:
326
+ ema.update(model)
327
+ last_opt_step = ni
328
+
329
+ # Log
330
+ if RANK in {-1, 0}:
331
+ mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
332
+ mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
333
+ pbar.set_description(('%11s' * 2 + '%11.4g' * 5) %
334
+ (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
335
+ callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss))
336
+ if callbacks.stop_training:
337
+ return
338
+ # end batch ------------------------------------------------------------------------------------------------
339
+
340
+ # Scheduler
341
+ lr = [x['lr'] for x in optimizer.param_groups] # for loggers
342
+ scheduler.step()
343
+
344
+ if RANK in {-1, 0}:
345
+ # mAP
346
+ callbacks.run('on_train_epoch_end', epoch=epoch)
347
+ ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
348
+ final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
349
+ if not noval or final_epoch: # Calculate mAP
350
+ results, maps, _ = validate.run(data_dict,
351
+ batch_size=batch_size // WORLD_SIZE * 2,
352
+ imgsz=imgsz,
353
+ half=amp,
354
+ model=ema.ema,
355
+ single_cls=single_cls,
356
+ dataloader=val_loader,
357
+ save_dir=save_dir,
358
+ plots=False,
359
+ callbacks=callbacks,
360
+ compute_loss=compute_loss)
361
+
362
+ # Update best mAP
363
+ fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
364
+ stop = stopper(epoch=epoch, fitness=fi) # early stop check
365
+ if fi > best_fitness:
366
+ best_fitness = fi
367
+ log_vals = list(mloss) + list(results) + lr
368
+ callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
369
+
370
+ # Save model
371
+ if (not nosave) or (final_epoch and not evolve): # if save
372
+ ckpt = {
373
+ 'epoch': epoch,
374
+ 'best_fitness': best_fitness,
375
+ 'model': deepcopy(de_parallel(model)).half(),
376
+ 'ema': deepcopy(ema.ema).half(),
377
+ 'updates': ema.updates,
378
+ 'optimizer': optimizer.state_dict(),
379
+ 'opt': vars(opt),
380
+ 'git': GIT_INFO, # {remote, branch, commit} if a git repo
381
+ 'date': datetime.now().isoformat()}
382
+
383
+ # Save last, best and delete
384
+ torch.save(ckpt, last)
385
+ if best_fitness == fi:
386
+ torch.save(ckpt, best)
387
+ if opt.save_period > 0 and epoch % opt.save_period == 0:
388
+ torch.save(ckpt, w / f'epoch{epoch}.pt')
389
+ del ckpt
390
+ callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
391
+
392
+ # EarlyStopping
393
+ if RANK != -1: # if DDP training
394
+ broadcast_list = [stop if RANK == 0 else None]
395
+ dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
396
+ if RANK != 0:
397
+ stop = broadcast_list[0]
398
+ if stop:
399
+ break # must break all DDP ranks
400
+
401
+ # end epoch ----------------------------------------------------------------------------------------------------
402
+ # end training -----------------------------------------------------------------------------------------------------
403
+ if RANK in {-1, 0}:
404
+ LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
405
+ for f in last, best:
406
+ if f.exists():
407
+ strip_optimizer(f) # strip optimizers
408
+ if f is best:
409
+ LOGGER.info(f'\nValidating {f}...')
410
+ results, _, _ = validate.run(
411
+ data_dict,
412
+ batch_size=batch_size // WORLD_SIZE * 2,
413
+ imgsz=imgsz,
414
+ model=attempt_load(f, device).half(),
415
+ single_cls=single_cls,
416
+ dataloader=val_loader,
417
+ save_dir=save_dir,
418
+ save_json=is_coco,
419
+ verbose=True,
420
+ plots=plots,
421
+ callbacks=callbacks,
422
+ compute_loss=compute_loss) # val best model with plots
423
+ if is_coco:
424
+ callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
425
+
426
+ callbacks.run('on_train_end', last, best, epoch, results)
427
+
428
+ torch.cuda.empty_cache()
429
+ return results
430
+
431
+
432
+ def parse_opt(known=False):
433
+ parser = argparse.ArgumentParser()
434
+ # parser.add_argument('--weights', type=str, default=ROOT / 'yolo.pt', help='initial weights path')
435
+ # parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
436
+ parser.add_argument('--weights', type=str, default='', help='initial weights path')
437
+ parser.add_argument('--cfg', type=str, default='yolo.yaml', help='model.yaml path')
438
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
439
+ parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-high.yaml', help='hyperparameters path')
440
+ parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
441
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
442
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
443
+ parser.add_argument('--rect', action='store_true', help='rectangular training')
444
+ parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
445
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
446
+ parser.add_argument('--noval', action='store_true', help='only validate final epoch')
447
+ parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
448
+ parser.add_argument('--noplots', action='store_true', help='save no plot files')
449
+ parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
450
+ parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
451
+ parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
452
+ parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
453
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
454
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
455
+ parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
456
+ parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer')
457
+ parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
458
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
459
+ parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
460
+ parser.add_argument('--name', default='exp', help='save to project/name')
461
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
462
+ parser.add_argument('--quad', action='store_true', help='quad dataloader')
463
+ parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
464
+ parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
465
+ parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
466
+ parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
467
+ parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
468
+ parser.add_argument('--seed', type=int, default=0, help='Global training seed')
469
+ parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
470
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
471
+ parser.add_argument('--close-mosaic', type=int, default=0, help='Experimental')
472
+
473
+ # Logger arguments
474
+ parser.add_argument('--entity', default=None, help='Entity')
475
+ parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option')
476
+ parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval')
477
+ parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use')
478
+
479
+ return parser.parse_known_args()[0] if known else parser.parse_args()
480
+
481
+
482
+ def main(opt, callbacks=Callbacks()):
483
+ # Checks
484
+ if RANK in {-1, 0}:
485
+ print_args(vars(opt))
486
+ #check_git_status()
487
+ #check_requirements()
488
+
489
+ # Resume (from specified or most recent last.pt)
490
+ if opt.resume and not check_comet_resume(opt) and not opt.evolve:
491
+ last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
492
+ opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
493
+ opt_data = opt.data # original dataset
494
+ if opt_yaml.is_file():
495
+ with open(opt_yaml, errors='ignore') as f:
496
+ d = yaml.safe_load(f)
497
+ else:
498
+ d = torch.load(last, map_location='cpu')['opt']
499
+ opt = argparse.Namespace(**d) # replace
500
+ opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
501
+ if is_url(opt_data):
502
+ opt.data = check_file(opt_data) # avoid HUB resume auth timeout
503
+ else:
504
+ opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
505
+ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
506
+ assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
507
+ if opt.evolve:
508
+ if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve
509
+ opt.project = str(ROOT / 'runs/evolve')
510
+ opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
511
+ if opt.name == 'cfg':
512
+ opt.name = Path(opt.cfg).stem # use model.yaml as name
513
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
514
+
515
+ # DDP mode
516
+ device = select_device(opt.device, batch_size=opt.batch_size)
517
+ if LOCAL_RANK != -1:
518
+ msg = 'is not compatible with YOLO Multi-GPU DDP training'
519
+ assert not opt.image_weights, f'--image-weights {msg}'
520
+ assert not opt.evolve, f'--evolve {msg}'
521
+ assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
522
+ assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
523
+ assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
524
+ torch.cuda.set_device(LOCAL_RANK)
525
+ device = torch.device('cuda', LOCAL_RANK)
526
+ dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
527
+
528
+ # Train
529
+ if not opt.evolve:
530
+ train(opt.hyp, opt, device, callbacks)
531
+
532
+ # Evolve hyperparameters (optional)
533
+ else:
534
+ # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
535
+ meta = {
536
+ 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
537
+ 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
538
+ 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
539
+ 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
540
+ 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
541
+ 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
542
+ 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
543
+ 'box': (1, 0.02, 0.2), # box loss gain
544
+ 'cls': (1, 0.2, 4.0), # cls loss gain
545
+ 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
546
+ 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
547
+ 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
548
+ 'iou_t': (0, 0.1, 0.7), # IoU training threshold
549
+ 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
550
+ 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
551
+ 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
552
+ 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
553
+ 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
554
+ 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
555
+ 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
556
+ 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
557
+ 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
558
+ 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
559
+ 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
560
+ 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
561
+ 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
562
+ 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
563
+ 'mixup': (1, 0.0, 1.0), # image mixup (probability)
564
+ 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
565
+
566
+ with open(opt.hyp, errors='ignore') as f:
567
+ hyp = yaml.safe_load(f) # load hyps dict
568
+ if 'anchors' not in hyp: # anchors commented in hyp.yaml
569
+ hyp['anchors'] = 3
570
+ if opt.noautoanchor:
571
+ del hyp['anchors'], meta['anchors']
572
+ opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
573
+ # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
574
+ evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
575
+ if opt.bucket:
576
+ os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists
577
+
578
+ for _ in range(opt.evolve): # generations to evolve
579
+ if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
580
+ # Select parent(s)
581
+ parent = 'single' # parent selection method: 'single' or 'weighted'
582
+ x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
583
+ n = min(5, len(x)) # number of previous results to consider
584
+ x = x[np.argsort(-fitness(x))][:n] # top n mutations
585
+ w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
586
+ if parent == 'single' or len(x) == 1:
587
+ # x = x[random.randint(0, n - 1)] # random selection
588
+ x = x[random.choices(range(n), weights=w)[0]] # weighted selection
589
+ elif parent == 'weighted':
590
+ x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
591
+
592
+ # Mutate
593
+ mp, s = 0.8, 0.2 # mutation probability, sigma
594
+ npr = np.random
595
+ npr.seed(int(time.time()))
596
+ g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
597
+ ng = len(meta)
598
+ v = np.ones(ng)
599
+ while all(v == 1): # mutate until a change occurs (prevent duplicates)
600
+ v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
601
+ for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
602
+ hyp[k] = float(x[i + 7] * v[i]) # mutate
603
+
604
+ # Constrain to limits
605
+ for k, v in meta.items():
606
+ hyp[k] = max(hyp[k], v[1]) # lower limit
607
+ hyp[k] = min(hyp[k], v[2]) # upper limit
608
+ hyp[k] = round(hyp[k], 5) # significant digits
609
+
610
+ # Train mutation
611
+ results = train(hyp.copy(), opt, device, callbacks)
612
+ callbacks = Callbacks()
613
+ # Write mutation results
614
+ keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss',
615
+ 'val/obj_loss', 'val/cls_loss')
616
+ print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket)
617
+
618
+ # Plot results
619
+ plot_evolve(evolve_csv)
620
+ LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
621
+ f"Results saved to {colorstr('bold', save_dir)}\n"
622
+ f'Usage example: $ python train.py --hyp {evolve_yaml}')
623
+
624
+
625
+ def run(**kwargs):
626
+ # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolo.pt')
627
+ opt = parse_opt(True)
628
+ for k, v in kwargs.items():
629
+ setattr(opt, k, v)
630
+ main(opt)
631
+ return opt
632
+
633
+
634
+ if __name__ == "__main__":
635
+ opt = parse_opt()
636
+ main(opt)
val.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ import torch
9
+ from tqdm import tqdm
10
+
11
+ FILE = Path(__file__).resolve()
12
+ ROOT = FILE.parents[0] # YOLO root directory
13
+ if str(ROOT) not in sys.path:
14
+ sys.path.append(str(ROOT)) # add ROOT to PATH
15
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
16
+
17
+ from models.common import DetectMultiBackend
18
+ from utils.callbacks import Callbacks
19
+ from utils.dataloaders import create_dataloader
20
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements,
21
+ check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression,
22
+ print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
23
+ from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
24
+ from utils.plots import output_to_target, plot_images, plot_val_study
25
+ from utils.torch_utils import select_device, smart_inference_mode
26
+
27
+
28
+ def save_one_txt(predn, save_conf, shape, file):
29
+ # Save one txt result
30
+ gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
31
+ for *xyxy, conf, cls in predn.tolist():
32
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
33
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
34
+ with open(file, 'a') as f:
35
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
36
+
37
+
38
+ def save_one_json(predn, jdict, path, class_map):
39
+ # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
40
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
41
+ box = xyxy2xywh(predn[:, :4]) # xywh
42
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
43
+ for p, b in zip(predn.tolist(), box.tolist()):
44
+ jdict.append({
45
+ 'image_id': image_id,
46
+ 'category_id': class_map[int(p[5])],
47
+ 'bbox': [round(x, 3) for x in b],
48
+ 'score': round(p[4], 5)})
49
+
50
+
51
+ def process_batch(detections, labels, iouv):
52
+ """
53
+ Return correct prediction matrix
54
+ Arguments:
55
+ detections (array[N, 6]), x1, y1, x2, y2, conf, class
56
+ labels (array[M, 5]), class, x1, y1, x2, y2
57
+ Returns:
58
+ correct (array[N, 10]), for 10 IoU levels
59
+ """
60
+ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
61
+ iou = box_iou(labels[:, 1:], detections[:, :4])
62
+ correct_class = labels[:, 0:1] == detections[:, 5]
63
+ for i in range(len(iouv)):
64
+ x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
65
+ if x[0].shape[0]:
66
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
67
+ if x[0].shape[0] > 1:
68
+ matches = matches[matches[:, 2].argsort()[::-1]]
69
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
70
+ # matches = matches[matches[:, 2].argsort()[::-1]]
71
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
72
+ correct[matches[:, 1].astype(int), i] = True
73
+ return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
74
+
75
+
76
+ @smart_inference_mode()
77
+ def run(
78
+ data,
79
+ weights=None, # model.pt path(s)
80
+ batch_size=32, # batch size
81
+ imgsz=640, # inference size (pixels)
82
+ conf_thres=0.001, # confidence threshold
83
+ iou_thres=0.7, # NMS IoU threshold
84
+ max_det=300, # maximum detections per image
85
+ task='val', # train, val, test, speed or study
86
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
87
+ workers=8, # max dataloader workers (per RANK in DDP mode)
88
+ single_cls=False, # treat as single-class dataset
89
+ augment=False, # augmented inference
90
+ verbose=False, # verbose output
91
+ save_txt=False, # save results to *.txt
92
+ save_hybrid=False, # save label+prediction hybrid results to *.txt
93
+ save_conf=False, # save confidences in --save-txt labels
94
+ save_json=False, # save a COCO-JSON results file
95
+ project=ROOT / 'runs/val', # save to project/name
96
+ name='exp', # save to project/name
97
+ exist_ok=False, # existing project/name ok, do not increment
98
+ half=True, # use FP16 half-precision inference
99
+ dnn=False, # use OpenCV DNN for ONNX inference
100
+ min_items=0, # Experimental
101
+ model=None,
102
+ dataloader=None,
103
+ save_dir=Path(''),
104
+ plots=True,
105
+ callbacks=Callbacks(),
106
+ compute_loss=None,
107
+ ):
108
+ # Initialize/load model and set device
109
+ training = model is not None
110
+ if training: # called by train.py
111
+ device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
112
+ half &= device.type != 'cpu' # half precision only supported on CUDA
113
+ model.half() if half else model.float()
114
+ else: # called directly
115
+ device = select_device(device, batch_size=batch_size)
116
+
117
+ # Directories
118
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
119
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
120
+
121
+ # Load model
122
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
123
+ stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
124
+ imgsz = check_img_size(imgsz, s=stride) # check image size
125
+ half = model.fp16 # FP16 supported on limited backends with CUDA
126
+ if engine:
127
+ batch_size = model.batch_size
128
+ else:
129
+ device = model.device
130
+ if not (pt or jit):
131
+ batch_size = 1 # export.py models default to batch-size 1
132
+ LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
133
+
134
+ # Data
135
+ data = check_dataset(data) # check
136
+
137
+ # Configure
138
+ model.eval()
139
+ cuda = device.type != 'cpu'
140
+ #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
141
+ is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset
142
+ nc = 1 if single_cls else int(data['nc']) # number of classes
143
+ iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
144
+ niou = iouv.numel()
145
+
146
+ # Dataloader
147
+ if not training:
148
+ if pt and not single_cls: # check --weights are trained on --data
149
+ ncm = model.model.nc
150
+ assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
151
+ f'classes). Pass correct combination of --weights and --data that are trained together.'
152
+ model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
153
+ pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
154
+ task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
155
+ dataloader = create_dataloader(data[task],
156
+ imgsz,
157
+ batch_size,
158
+ stride,
159
+ single_cls,
160
+ pad=pad,
161
+ rect=rect,
162
+ workers=workers,
163
+ min_items=opt.min_items,
164
+ prefix=colorstr(f'{task}: '))[0]
165
+
166
+ seen = 0
167
+ confusion_matrix = ConfusionMatrix(nc=nc)
168
+ names = model.names if hasattr(model, 'names') else model.module.names # get class names
169
+ if isinstance(names, (list, tuple)): # old format
170
+ names = dict(enumerate(names))
171
+ class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
172
+ s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95')
173
+ tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
174
+ dt = Profile(), Profile(), Profile() # profiling times
175
+ loss = torch.zeros(3, device=device)
176
+ jdict, stats, ap, ap_class = [], [], [], []
177
+ callbacks.run('on_val_start')
178
+ pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
179
+ for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
180
+ callbacks.run('on_val_batch_start')
181
+ with dt[0]:
182
+ if cuda:
183
+ im = im.to(device, non_blocking=True)
184
+ targets = targets.to(device)
185
+ im = im.half() if half else im.float() # uint8 to fp16/32
186
+ im /= 255 # 0 - 255 to 0.0 - 1.0
187
+ nb, _, height, width = im.shape # batch size, channels, height, width
188
+
189
+ # Inference
190
+ with dt[1]:
191
+ preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)
192
+
193
+ # Loss
194
+ if compute_loss:
195
+ loss += compute_loss(train_out, targets)[1] # box, obj, cls
196
+
197
+ # NMS
198
+ targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
199
+ lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
200
+ with dt[2]:
201
+ preds = non_max_suppression(preds,
202
+ conf_thres,
203
+ iou_thres,
204
+ labels=lb,
205
+ multi_label=True,
206
+ agnostic=single_cls,
207
+ max_det=max_det)
208
+
209
+ # Metrics
210
+ for si, pred in enumerate(preds):
211
+ labels = targets[targets[:, 0] == si, 1:]
212
+ nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
213
+ path, shape = Path(paths[si]), shapes[si][0]
214
+ correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
215
+ seen += 1
216
+
217
+ if npr == 0:
218
+ if nl:
219
+ stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0]))
220
+ if plots:
221
+ confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
222
+ continue
223
+
224
+ # Predictions
225
+ if single_cls:
226
+ pred[:, 5] = 0
227
+ predn = pred.clone()
228
+ scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
229
+
230
+ # Evaluate
231
+ if nl:
232
+ tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
233
+ scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
234
+ labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
235
+ correct = process_batch(predn, labelsn, iouv)
236
+ if plots:
237
+ confusion_matrix.process_batch(predn, labelsn)
238
+ stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls)
239
+
240
+ # Save/log
241
+ if save_txt:
242
+ save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
243
+ if save_json:
244
+ save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
245
+ callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
246
+
247
+ # Plot images
248
+ if plots and batch_i < 3:
249
+ plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels
250
+ plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
251
+
252
+ callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds)
253
+
254
+ # Compute metrics
255
+ stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
256
+ if len(stats) and stats[0].any():
257
+ tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
258
+ ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
259
+ mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
260
+ nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class
261
+
262
+ # Print results
263
+ pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format
264
+ LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
265
+ if nt.sum() == 0:
266
+ LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
267
+
268
+ # Print results per class
269
+ if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
270
+ for i, c in enumerate(ap_class):
271
+ LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
272
+
273
+ # Print speeds
274
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
275
+ if not training:
276
+ shape = (batch_size, 3, imgsz, imgsz)
277
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
278
+
279
+ # Plots
280
+ if plots:
281
+ confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
282
+ callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
283
+
284
+ # Save JSON
285
+ if save_json and len(jdict):
286
+ w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
287
+ anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
288
+ pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
289
+ LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
290
+ with open(pred_json, 'w') as f:
291
+ json.dump(jdict, f)
292
+
293
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
294
+ check_requirements('pycocotools')
295
+ from pycocotools.coco import COCO
296
+ from pycocotools.cocoeval import COCOeval
297
+
298
+ anno = COCO(anno_json) # init annotations api
299
+ pred = anno.loadRes(pred_json) # init predictions api
300
+ eval = COCOeval(anno, pred, 'bbox')
301
+ if is_coco:
302
+ eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
303
+ eval.evaluate()
304
+ eval.accumulate()
305
+ eval.summarize()
306
+ map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
307
+ except Exception as e:
308
+ LOGGER.info(f'pycocotools unable to run: {e}')
309
+
310
+ # Return results
311
+ model.float() # for training
312
+ if not training:
313
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
314
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
315
+ maps = np.zeros(nc) + map
316
+ for i, c in enumerate(ap_class):
317
+ maps[c] = ap[i]
318
+ return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
319
+
320
+
321
+ def parse_opt():
322
+ parser = argparse.ArgumentParser()
323
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
324
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path(s)')
325
+ parser.add_argument('--batch-size', type=int, default=32, help='batch size')
326
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
327
+ parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
328
+ parser.add_argument('--iou-thres', type=float, default=0.7, help='NMS IoU threshold')
329
+ parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
330
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
331
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
332
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
333
+ parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
334
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
335
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
336
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
337
+ parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
338
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
339
+ parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
340
+ parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
341
+ parser.add_argument('--name', default='exp', help='save to project/name')
342
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
343
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
344
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
345
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
346
+ opt = parser.parse_args()
347
+ opt.data = check_yaml(opt.data) # check YAML
348
+ opt.save_json |= opt.data.endswith('coco.yaml')
349
+ opt.save_txt |= opt.save_hybrid
350
+ print_args(vars(opt))
351
+ return opt
352
+
353
+
354
+ def main(opt):
355
+ #check_requirements(exclude=('tensorboard', 'thop'))
356
+
357
+ if opt.task in ('train', 'val', 'test'): # run normally
358
+ if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
359
+ LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
360
+ if opt.save_hybrid:
361
+ LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone')
362
+ run(**vars(opt))
363
+
364
+ else:
365
+ weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
366
+ opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
367
+ if opt.task == 'speed': # speed benchmarks
368
+ # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt...
369
+ opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
370
+ for opt.weights in weights:
371
+ run(**vars(opt), plots=False)
372
+
373
+ elif opt.task == 'study': # speed vs mAP benchmarks
374
+ # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt...
375
+ for opt.weights in weights:
376
+ f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
377
+ x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
378
+ for opt.imgsz in x: # img-size
379
+ LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
380
+ r, _, t = run(**vars(opt), plots=False)
381
+ y.append(r + t) # results and times
382
+ np.savetxt(f, y, fmt='%10.4g') # save
383
+ os.system('zip -r study.zip study_*.txt')
384
+ plot_val_study(x=x) # plot
385
+
386
+
387
+ if __name__ == "__main__":
388
+ opt = parse_opt()
389
+ main(opt)
val_dual.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ import torch
9
+ from tqdm import tqdm
10
+
11
+ FILE = Path(__file__).resolve()
12
+ ROOT = FILE.parents[0] # YOLO root directory
13
+ if str(ROOT) not in sys.path:
14
+ sys.path.append(str(ROOT)) # add ROOT to PATH
15
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
16
+
17
+ from models.common import DetectMultiBackend
18
+ from utils.callbacks import Callbacks
19
+ from utils.dataloaders import create_dataloader
20
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements,
21
+ check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression,
22
+ print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
23
+ from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
24
+ from utils.plots import output_to_target, plot_images, plot_val_study
25
+ from utils.torch_utils import select_device, smart_inference_mode
26
+
27
+
28
+ def save_one_txt(predn, save_conf, shape, file):
29
+ # Save one txt result
30
+ gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
31
+ for *xyxy, conf, cls in predn.tolist():
32
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
33
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
34
+ with open(file, 'a') as f:
35
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
36
+
37
+
38
+ def save_one_json(predn, jdict, path, class_map):
39
+ # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
40
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
41
+ box = xyxy2xywh(predn[:, :4]) # xywh
42
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
43
+ for p, b in zip(predn.tolist(), box.tolist()):
44
+ jdict.append({
45
+ 'image_id': image_id,
46
+ 'category_id': class_map[int(p[5])],
47
+ 'bbox': [round(x, 3) for x in b],
48
+ 'score': round(p[4], 5)})
49
+
50
+
51
+ def process_batch(detections, labels, iouv):
52
+ """
53
+ Return correct prediction matrix
54
+ Arguments:
55
+ detections (array[N, 6]), x1, y1, x2, y2, conf, class
56
+ labels (array[M, 5]), class, x1, y1, x2, y2
57
+ Returns:
58
+ correct (array[N, 10]), for 10 IoU levels
59
+ """
60
+ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
61
+ iou = box_iou(labels[:, 1:], detections[:, :4])
62
+ correct_class = labels[:, 0:1] == detections[:, 5]
63
+ for i in range(len(iouv)):
64
+ x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
65
+ if x[0].shape[0]:
66
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
67
+ if x[0].shape[0] > 1:
68
+ matches = matches[matches[:, 2].argsort()[::-1]]
69
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
70
+ # matches = matches[matches[:, 2].argsort()[::-1]]
71
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
72
+ correct[matches[:, 1].astype(int), i] = True
73
+ return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
74
+
75
+
76
+ @smart_inference_mode()
77
+ def run(
78
+ data,
79
+ weights=None, # model.pt path(s)
80
+ batch_size=32, # batch size
81
+ imgsz=640, # inference size (pixels)
82
+ conf_thres=0.001, # confidence threshold
83
+ iou_thres=0.7, # NMS IoU threshold
84
+ max_det=300, # maximum detections per image
85
+ task='val', # train, val, test, speed or study
86
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
87
+ workers=8, # max dataloader workers (per RANK in DDP mode)
88
+ single_cls=False, # treat as single-class dataset
89
+ augment=False, # augmented inference
90
+ verbose=False, # verbose output
91
+ save_txt=False, # save results to *.txt
92
+ save_hybrid=False, # save label+prediction hybrid results to *.txt
93
+ save_conf=False, # save confidences in --save-txt labels
94
+ save_json=False, # save a COCO-JSON results file
95
+ project=ROOT / 'runs/val', # save to project/name
96
+ name='exp', # save to project/name
97
+ exist_ok=False, # existing project/name ok, do not increment
98
+ half=True, # use FP16 half-precision inference
99
+ dnn=False, # use OpenCV DNN for ONNX inference
100
+ min_items=0, # Experimental
101
+ model=None,
102
+ dataloader=None,
103
+ save_dir=Path(''),
104
+ plots=True,
105
+ callbacks=Callbacks(),
106
+ compute_loss=None,
107
+ ):
108
+ # Initialize/load model and set device
109
+ training = model is not None
110
+ if training: # called by train.py
111
+ device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
112
+ half &= device.type != 'cpu' # half precision only supported on CUDA
113
+ model.half() if half else model.float()
114
+ else: # called directly
115
+ device = select_device(device, batch_size=batch_size)
116
+
117
+ # Directories
118
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
119
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
120
+
121
+ # Load model
122
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
123
+ stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
124
+ imgsz = check_img_size(imgsz, s=stride) # check image size
125
+ half = model.fp16 # FP16 supported on limited backends with CUDA
126
+ if engine:
127
+ batch_size = model.batch_size
128
+ else:
129
+ device = model.device
130
+ if not (pt or jit):
131
+ batch_size = 1 # export.py models default to batch-size 1
132
+ LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
133
+
134
+ # Data
135
+ data = check_dataset(data) # check
136
+
137
+ # Configure
138
+ model.eval()
139
+ cuda = device.type != 'cpu'
140
+ #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
141
+ is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset
142
+ nc = 1 if single_cls else int(data['nc']) # number of classes
143
+ iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
144
+ niou = iouv.numel()
145
+
146
+ # Dataloader
147
+ if not training:
148
+ if pt and not single_cls: # check --weights are trained on --data
149
+ ncm = model.model.nc
150
+ assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
151
+ f'classes). Pass correct combination of --weights and --data that are trained together.'
152
+ model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
153
+ pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
154
+ task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
155
+ dataloader = create_dataloader(data[task],
156
+ imgsz,
157
+ batch_size,
158
+ stride,
159
+ single_cls,
160
+ pad=pad,
161
+ rect=rect,
162
+ workers=workers,
163
+ min_items=opt.min_items,
164
+ prefix=colorstr(f'{task}: '))[0]
165
+
166
+ seen = 0
167
+ confusion_matrix = ConfusionMatrix(nc=nc)
168
+ names = model.names if hasattr(model, 'names') else model.module.names # get class names
169
+ if isinstance(names, (list, tuple)): # old format
170
+ names = dict(enumerate(names))
171
+ class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
172
+ s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95')
173
+ tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
174
+ dt = Profile(), Profile(), Profile() # profiling times
175
+ loss = torch.zeros(3, device=device)
176
+ jdict, stats, ap, ap_class = [], [], [], []
177
+ callbacks.run('on_val_start')
178
+ pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
179
+ for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
180
+ callbacks.run('on_val_batch_start')
181
+ with dt[0]:
182
+ if cuda:
183
+ im = im.to(device, non_blocking=True)
184
+ targets = targets.to(device)
185
+ im = im.half() if half else im.float() # uint8 to fp16/32
186
+ im /= 255 # 0 - 255 to 0.0 - 1.0
187
+ nb, _, height, width = im.shape # batch size, channels, height, width
188
+
189
+ # Inference
190
+ with dt[1]:
191
+ preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)
192
+
193
+ # Loss
194
+ if compute_loss:
195
+ preds = preds[1]
196
+ #train_out = train_out[1]
197
+ #loss += compute_loss(train_out, targets)[1] # box, obj, cls
198
+ else:
199
+ preds = preds[0][1]
200
+
201
+ # NMS
202
+ targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
203
+ lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
204
+ with dt[2]:
205
+ preds = non_max_suppression(preds,
206
+ conf_thres,
207
+ iou_thres,
208
+ labels=lb,
209
+ multi_label=True,
210
+ agnostic=single_cls,
211
+ max_det=max_det)
212
+
213
+ # Metrics
214
+ for si, pred in enumerate(preds):
215
+ labels = targets[targets[:, 0] == si, 1:]
216
+ nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
217
+ path, shape = Path(paths[si]), shapes[si][0]
218
+ correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
219
+ seen += 1
220
+
221
+ if npr == 0:
222
+ if nl:
223
+ stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0]))
224
+ if plots:
225
+ confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
226
+ continue
227
+
228
+ # Predictions
229
+ if single_cls:
230
+ pred[:, 5] = 0
231
+ predn = pred.clone()
232
+ scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
233
+
234
+ # Evaluate
235
+ if nl:
236
+ tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
237
+ scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
238
+ labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
239
+ correct = process_batch(predn, labelsn, iouv)
240
+ if plots:
241
+ confusion_matrix.process_batch(predn, labelsn)
242
+ stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls)
243
+
244
+ # Save/log
245
+ if save_txt:
246
+ save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
247
+ if save_json:
248
+ save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
249
+ callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
250
+
251
+ # Plot images
252
+ if plots and batch_i < 3:
253
+ plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels
254
+ plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
255
+
256
+ callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds)
257
+
258
+ # Compute metrics
259
+ stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
260
+ if len(stats) and stats[0].any():
261
+ tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
262
+ ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
263
+ mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
264
+ nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class
265
+
266
+ # Print results
267
+ pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format
268
+ LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
269
+ if nt.sum() == 0:
270
+ LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
271
+
272
+ # Print results per class
273
+ if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
274
+ for i, c in enumerate(ap_class):
275
+ LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
276
+
277
+ # Print speeds
278
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
279
+ if not training:
280
+ shape = (batch_size, 3, imgsz, imgsz)
281
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
282
+
283
+ # Plots
284
+ if plots:
285
+ confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
286
+ callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
287
+
288
+ # Save JSON
289
+ if save_json and len(jdict):
290
+ w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
291
+ anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
292
+ pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
293
+ LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
294
+ with open(pred_json, 'w') as f:
295
+ json.dump(jdict, f)
296
+
297
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
298
+ check_requirements('pycocotools')
299
+ from pycocotools.coco import COCO
300
+ from pycocotools.cocoeval import COCOeval
301
+
302
+ anno = COCO(anno_json) # init annotations api
303
+ pred = anno.loadRes(pred_json) # init predictions api
304
+ eval = COCOeval(anno, pred, 'bbox')
305
+ if is_coco:
306
+ eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
307
+ eval.evaluate()
308
+ eval.accumulate()
309
+ eval.summarize()
310
+ map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
311
+ except Exception as e:
312
+ LOGGER.info(f'pycocotools unable to run: {e}')
313
+
314
+ # Return results
315
+ model.float() # for training
316
+ if not training:
317
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
318
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
319
+ maps = np.zeros(nc) + map
320
+ for i, c in enumerate(ap_class):
321
+ maps[c] = ap[i]
322
+ return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
323
+
324
+
325
+ def parse_opt():
326
+ parser = argparse.ArgumentParser()
327
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
328
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path(s)')
329
+ parser.add_argument('--batch-size', type=int, default=32, help='batch size')
330
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
331
+ parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
332
+ parser.add_argument('--iou-thres', type=float, default=0.7, help='NMS IoU threshold')
333
+ parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
334
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
335
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
336
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
337
+ parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
338
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
339
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
340
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
341
+ parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
342
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
343
+ parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
344
+ parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
345
+ parser.add_argument('--name', default='exp', help='save to project/name')
346
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
347
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
348
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
349
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
350
+ opt = parser.parse_args()
351
+ opt.data = check_yaml(opt.data) # check YAML
352
+ opt.save_json |= opt.data.endswith('coco.yaml')
353
+ opt.save_txt |= opt.save_hybrid
354
+ print_args(vars(opt))
355
+ return opt
356
+
357
+
358
+ def main(opt):
359
+ #check_requirements(exclude=('tensorboard', 'thop'))
360
+
361
+ if opt.task in ('train', 'val', 'test'): # run normally
362
+ if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
363
+ LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
364
+ if opt.save_hybrid:
365
+ LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone')
366
+ run(**vars(opt))
367
+
368
+ else:
369
+ weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
370
+ opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
371
+ if opt.task == 'speed': # speed benchmarks
372
+ # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt...
373
+ opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
374
+ for opt.weights in weights:
375
+ run(**vars(opt), plots=False)
376
+
377
+ elif opt.task == 'study': # speed vs mAP benchmarks
378
+ # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt...
379
+ for opt.weights in weights:
380
+ f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
381
+ x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
382
+ for opt.imgsz in x: # img-size
383
+ LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
384
+ r, _, t = run(**vars(opt), plots=False)
385
+ y.append(r + t) # results and times
386
+ np.savetxt(f, y, fmt='%10.4g') # save
387
+ os.system('zip -r study.zip study_*.txt')
388
+ plot_val_study(x=x) # plot
389
+
390
+
391
+ if __name__ == "__main__":
392
+ opt = parse_opt()
393
+ main(opt)
val_triple.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ import torch
9
+ from tqdm import tqdm
10
+
11
+ FILE = Path(__file__).resolve()
12
+ ROOT = FILE.parents[0] # YOLO root directory
13
+ if str(ROOT) not in sys.path:
14
+ sys.path.append(str(ROOT)) # add ROOT to PATH
15
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
16
+
17
+ from models.common import DetectMultiBackend
18
+ from utils.callbacks import Callbacks
19
+ from utils.dataloaders import create_dataloader
20
+ from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements,
21
+ check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression,
22
+ print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
23
+ from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
24
+ from utils.plots import output_to_target, plot_images, plot_val_study
25
+ from utils.torch_utils import select_device, smart_inference_mode
26
+
27
+
28
+ def save_one_txt(predn, save_conf, shape, file):
29
+ # Save one txt result
30
+ gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
31
+ for *xyxy, conf, cls in predn.tolist():
32
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
33
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
34
+ with open(file, 'a') as f:
35
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
36
+
37
+
38
+ def save_one_json(predn, jdict, path, class_map):
39
+ # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
40
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
41
+ box = xyxy2xywh(predn[:, :4]) # xywh
42
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
43
+ for p, b in zip(predn.tolist(), box.tolist()):
44
+ jdict.append({
45
+ 'image_id': image_id,
46
+ 'category_id': class_map[int(p[5])],
47
+ 'bbox': [round(x, 3) for x in b],
48
+ 'score': round(p[4], 5)})
49
+
50
+
51
+ def process_batch(detections, labels, iouv):
52
+ """
53
+ Return correct prediction matrix
54
+ Arguments:
55
+ detections (array[N, 6]), x1, y1, x2, y2, conf, class
56
+ labels (array[M, 5]), class, x1, y1, x2, y2
57
+ Returns:
58
+ correct (array[N, 10]), for 10 IoU levels
59
+ """
60
+ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
61
+ iou = box_iou(labels[:, 1:], detections[:, :4])
62
+ correct_class = labels[:, 0:1] == detections[:, 5]
63
+ for i in range(len(iouv)):
64
+ x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
65
+ if x[0].shape[0]:
66
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
67
+ if x[0].shape[0] > 1:
68
+ matches = matches[matches[:, 2].argsort()[::-1]]
69
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
70
+ # matches = matches[matches[:, 2].argsort()[::-1]]
71
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
72
+ correct[matches[:, 1].astype(int), i] = True
73
+ return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
74
+
75
+
76
+ @smart_inference_mode()
77
+ def run(
78
+ data,
79
+ weights=None, # model.pt path(s)
80
+ batch_size=32, # batch size
81
+ imgsz=640, # inference size (pixels)
82
+ conf_thres=0.001, # confidence threshold
83
+ iou_thres=0.7, # NMS IoU threshold
84
+ max_det=300, # maximum detections per image
85
+ task='val', # train, val, test, speed or study
86
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
87
+ workers=8, # max dataloader workers (per RANK in DDP mode)
88
+ single_cls=False, # treat as single-class dataset
89
+ augment=False, # augmented inference
90
+ verbose=False, # verbose output
91
+ save_txt=False, # save results to *.txt
92
+ save_hybrid=False, # save label+prediction hybrid results to *.txt
93
+ save_conf=False, # save confidences in --save-txt labels
94
+ save_json=False, # save a COCO-JSON results file
95
+ project=ROOT / 'runs/val', # save to project/name
96
+ name='exp', # save to project/name
97
+ exist_ok=False, # existing project/name ok, do not increment
98
+ half=True, # use FP16 half-precision inference
99
+ dnn=False, # use OpenCV DNN for ONNX inference
100
+ min_items=0, # Experimental
101
+ model=None,
102
+ dataloader=None,
103
+ save_dir=Path(''),
104
+ plots=True,
105
+ callbacks=Callbacks(),
106
+ compute_loss=None,
107
+ ):
108
+ # Initialize/load model and set device
109
+ training = model is not None
110
+ if training: # called by train.py
111
+ device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
112
+ half &= device.type != 'cpu' # half precision only supported on CUDA
113
+ model.half() if half else model.float()
114
+ else: # called directly
115
+ device = select_device(device, batch_size=batch_size)
116
+
117
+ # Directories
118
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
119
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
120
+
121
+ # Load model
122
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
123
+ stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
124
+ imgsz = check_img_size(imgsz, s=stride) # check image size
125
+ half = model.fp16 # FP16 supported on limited backends with CUDA
126
+ if engine:
127
+ batch_size = model.batch_size
128
+ else:
129
+ device = model.device
130
+ if not (pt or jit):
131
+ batch_size = 1 # export.py models default to batch-size 1
132
+ LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
133
+
134
+ # Data
135
+ data = check_dataset(data) # check
136
+
137
+ # Configure
138
+ model.eval()
139
+ cuda = device.type != 'cpu'
140
+ #is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
141
+ is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'val2017.txt') # COCO dataset
142
+ nc = 1 if single_cls else int(data['nc']) # number of classes
143
+ iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
144
+ niou = iouv.numel()
145
+
146
+ # Dataloader
147
+ if not training:
148
+ if pt and not single_cls: # check --weights are trained on --data
149
+ ncm = model.model.nc
150
+ assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
151
+ f'classes). Pass correct combination of --weights and --data that are trained together.'
152
+ model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
153
+ pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
154
+ task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
155
+ dataloader = create_dataloader(data[task],
156
+ imgsz,
157
+ batch_size,
158
+ stride,
159
+ single_cls,
160
+ pad=pad,
161
+ rect=rect,
162
+ workers=workers,
163
+ min_items=opt.min_items,
164
+ prefix=colorstr(f'{task}: '))[0]
165
+
166
+ seen = 0
167
+ confusion_matrix = ConfusionMatrix(nc=nc)
168
+ names = model.names if hasattr(model, 'names') else model.module.names # get class names
169
+ if isinstance(names, (list, tuple)): # old format
170
+ names = dict(enumerate(names))
171
+ class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
172
+ s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95')
173
+ tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
174
+ dt = Profile(), Profile(), Profile() # profiling times
175
+ loss = torch.zeros(3, device=device)
176
+ jdict, stats, ap, ap_class = [], [], [], []
177
+ callbacks.run('on_val_start')
178
+ pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
179
+ for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
180
+ callbacks.run('on_val_batch_start')
181
+ with dt[0]:
182
+ if cuda:
183
+ im = im.to(device, non_blocking=True)
184
+ targets = targets.to(device)
185
+ im = im.half() if half else im.float() # uint8 to fp16/32
186
+ im /= 255 # 0 - 255 to 0.0 - 1.0
187
+ nb, _, height, width = im.shape # batch size, channels, height, width
188
+
189
+ # Inference
190
+ with dt[1]:
191
+ preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None)
192
+ preds = preds[2]
193
+ train_out = train_out[2]
194
+
195
+ # Loss
196
+ #if compute_loss:
197
+ # loss += compute_loss(train_out, targets)[2] # box, obj, cls
198
+
199
+ # NMS
200
+ targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
201
+ lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
202
+ with dt[2]:
203
+ preds = non_max_suppression(preds,
204
+ conf_thres,
205
+ iou_thres,
206
+ labels=lb,
207
+ multi_label=True,
208
+ agnostic=single_cls,
209
+ max_det=max_det)
210
+
211
+ # Metrics
212
+ for si, pred in enumerate(preds):
213
+ labels = targets[targets[:, 0] == si, 1:]
214
+ nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
215
+ path, shape = Path(paths[si]), shapes[si][0]
216
+ correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
217
+ seen += 1
218
+
219
+ if npr == 0:
220
+ if nl:
221
+ stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0]))
222
+ if plots:
223
+ confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
224
+ continue
225
+
226
+ # Predictions
227
+ if single_cls:
228
+ pred[:, 5] = 0
229
+ predn = pred.clone()
230
+ scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
231
+
232
+ # Evaluate
233
+ if nl:
234
+ tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
235
+ scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
236
+ labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
237
+ correct = process_batch(predn, labelsn, iouv)
238
+ if plots:
239
+ confusion_matrix.process_batch(predn, labelsn)
240
+ stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls)
241
+
242
+ # Save/log
243
+ if save_txt:
244
+ save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
245
+ if save_json:
246
+ save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
247
+ callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
248
+
249
+ # Plot images
250
+ if plots and batch_i < 3:
251
+ plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels
252
+ plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
253
+
254
+ callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds)
255
+
256
+ # Compute metrics
257
+ stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
258
+ if len(stats) and stats[0].any():
259
+ tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
260
+ ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
261
+ mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
262
+ nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class
263
+
264
+ # Print results
265
+ pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format
266
+ LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
267
+ if nt.sum() == 0:
268
+ LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
269
+
270
+ # Print results per class
271
+ if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
272
+ for i, c in enumerate(ap_class):
273
+ LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
274
+
275
+ # Print speeds
276
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
277
+ if not training:
278
+ shape = (batch_size, 3, imgsz, imgsz)
279
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
280
+
281
+ # Plots
282
+ if plots:
283
+ confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
284
+ callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix)
285
+
286
+ # Save JSON
287
+ if save_json and len(jdict):
288
+ w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
289
+ anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
290
+ pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
291
+ LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
292
+ with open(pred_json, 'w') as f:
293
+ json.dump(jdict, f)
294
+
295
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
296
+ check_requirements('pycocotools')
297
+ from pycocotools.coco import COCO
298
+ from pycocotools.cocoeval import COCOeval
299
+
300
+ anno = COCO(anno_json) # init annotations api
301
+ pred = anno.loadRes(pred_json) # init predictions api
302
+ eval = COCOeval(anno, pred, 'bbox')
303
+ if is_coco:
304
+ eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate
305
+ eval.evaluate()
306
+ eval.accumulate()
307
+ eval.summarize()
308
+ map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
309
+ except Exception as e:
310
+ LOGGER.info(f'pycocotools unable to run: {e}')
311
+
312
+ # Return results
313
+ model.float() # for training
314
+ if not training:
315
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
316
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
317
+ maps = np.zeros(nc) + map
318
+ for i, c in enumerate(ap_class):
319
+ maps[c] = ap[i]
320
+ return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
321
+
322
+
323
+ def parse_opt():
324
+ parser = argparse.ArgumentParser()
325
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco.yaml', help='dataset.yaml path')
326
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolo.pt', help='model path(s)')
327
+ parser.add_argument('--batch-size', type=int, default=32, help='batch size')
328
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
329
+ parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
330
+ parser.add_argument('--iou-thres', type=float, default=0.7, help='NMS IoU threshold')
331
+ parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
332
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
333
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
334
+ parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
335
+ parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
336
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
337
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
338
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
339
+ parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
340
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
341
+ parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
342
+ parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
343
+ parser.add_argument('--name', default='exp', help='save to project/name')
344
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
345
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
346
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
347
+ parser.add_argument('--min-items', type=int, default=0, help='Experimental')
348
+ opt = parser.parse_args()
349
+ opt.data = check_yaml(opt.data) # check YAML
350
+ opt.save_json |= opt.data.endswith('coco.yaml')
351
+ opt.save_txt |= opt.save_hybrid
352
+ print_args(vars(opt))
353
+ return opt
354
+
355
+
356
+ def main(opt):
357
+ #check_requirements(exclude=('tensorboard', 'thop'))
358
+
359
+ if opt.task in ('train', 'val', 'test'): # run normally
360
+ if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
361
+ LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
362
+ if opt.save_hybrid:
363
+ LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone')
364
+ run(**vars(opt))
365
+
366
+ else:
367
+ weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
368
+ opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
369
+ if opt.task == 'speed': # speed benchmarks
370
+ # python val.py --task speed --data coco.yaml --batch 1 --weights yolo.pt...
371
+ opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
372
+ for opt.weights in weights:
373
+ run(**vars(opt), plots=False)
374
+
375
+ elif opt.task == 'study': # speed vs mAP benchmarks
376
+ # python val.py --task study --data coco.yaml --iou 0.7 --weights yolo.pt...
377
+ for opt.weights in weights:
378
+ f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
379
+ x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
380
+ for opt.imgsz in x: # img-size
381
+ LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
382
+ r, _, t = run(**vars(opt), plots=False)
383
+ y.append(r + t) # results and times
384
+ np.savetxt(f, y, fmt='%10.4g') # save
385
+ os.system('zip -r study.zip study_*.txt')
386
+ plot_val_study(x=x) # plot
387
+
388
+
389
+ if __name__ == "__main__":
390
+ opt = parse_opt()
391
+ main(opt)
view.py ADDED
@@ -0,0 +1 @@
 
 
1
+