glenn-jocher commited on
Commit
4821d07
β€’
1 Parent(s): d3e7778

Increment train, test, detect runs/ (#1322)

Browse files

* Increment train, test, detect runs/

* Update ci-testing.yml

* inference/images to data/images

* move images

* runs/exp to runs/train/exp

* update 'results saved to %s' str

.github/workflows/ci-testing.yml CHANGED
@@ -66,10 +66,10 @@ jobs:
66
  python train.py --img 256 --batch 8 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di
67
  # detect
68
  python detect.py --weights weights/${{ matrix.model }}.pt --device $di
69
- python detect.py --weights runs/exp0/weights/last.pt --device $di
70
  # test
71
  python test.py --img 256 --batch 8 --weights weights/${{ matrix.model }}.pt --device $di
72
- python test.py --img 256 --batch 8 --weights runs/exp0/weights/last.pt --device $di
73
 
74
  python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect
75
  python models/export.py --img 256 --batch 1 --weights weights/${{ matrix.model }}.pt # export
 
66
  python train.py --img 256 --batch 8 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di
67
  # detect
68
  python detect.py --weights weights/${{ matrix.model }}.pt --device $di
69
+ python detect.py --weights runs/train/exp0/weights/last.pt --device $di
70
  # test
71
  python test.py --img 256 --batch 8 --weights weights/${{ matrix.model }}.pt --device $di
72
+ python test.py --img 256 --batch 8 --weights runs/train/exp0/weights/last.pt --device $di
73
 
74
  python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect
75
  python models/export.py --img 256 --batch 1 --weights weights/${{ matrix.model }}.pt # export
.gitignore CHANGED
@@ -26,8 +26,8 @@
26
  storage.googleapis.com
27
  runs/*
28
  data/*
29
- !data/samples/zidane.jpg
30
- !data/samples/bus.jpg
31
  !data/coco.names
32
  !data/coco_paper.names
33
  !data/coco.data
 
26
  storage.googleapis.com
27
  runs/*
28
  data/*
29
+ !data/images/zidane.jpg
30
+ !data/images/bus.jpg
31
  !data/coco.names
32
  !data/coco_paper.names
33
  !data/coco.data
Dockerfile CHANGED
@@ -46,7 +46,7 @@ COPY . /usr/src/app
46
  # sudo docker commit 092b16b25c5b usr/resume && sudo docker run -it --gpus all --ipc=host -v "$(pwd)"/coco:/usr/src/coco --entrypoint=sh usr/resume
47
 
48
  # Send weights to GCP
49
- # python -c "from utils.general import *; strip_optimizer('runs/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt
50
 
51
  # Clean up
52
  # docker system prune -a --volumes
 
46
  # sudo docker commit 092b16b25c5b usr/resume && sudo docker run -it --gpus all --ipc=host -v "$(pwd)"/coco:/usr/src/coco --entrypoint=sh usr/resume
47
 
48
  # Send weights to GCP
49
+ # python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt
50
 
51
  # Clean up
52
  # docker system prune -a --volumes
README.md CHANGED
@@ -70,7 +70,7 @@ YOLOv5 may be run in any of the following up-to-date verified environments (with
70
 
71
  ## Inference
72
 
73
- detect.py runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `inference/output`.
74
  ```bash
75
  $ python detect.py --source 0 # webcam
76
  file.jpg # image
@@ -82,20 +82,20 @@ $ python detect.py --source 0 # webcam
82
  http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream
83
  ```
84
 
85
- To run inference on example images in `inference/images`:
86
  ```bash
87
- $ python detect.py --source inference/images --weights yolov5s.pt --conf 0.25
88
 
89
- Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, output='inference/output', save_conf=False, save_txt=False, source='inference/images', update=False, view_img=False, weights='yolov5s.pt')
90
  Using CUDA device0 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', total_memory=16160MB)
91
 
92
  Downloading https://github.com/ultralytics/yolov5/releases/download/v3.0/yolov5s.pt to yolov5s.pt... 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14.5M/14.5M [00:00<00:00, 21.3MB/s]
93
 
94
  Fusing layers...
95
  Model Summary: 140 layers, 7.45958e+06 parameters, 0 gradients
96
- image 1/2 yolov5/inference/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.013s)
97
- image 2/2 yolov5/inference/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.013s)
98
- Results saved to yolov5/inference/output
99
  Done. (0.124s)
100
  ```
101
  <img src="https://user-images.githubusercontent.com/26833433/97107365-685a8d80-16c7-11eb-8c2e-83aac701d8b9.jpeg" width="500">
 
70
 
71
  ## Inference
72
 
73
+ detect.py runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
74
  ```bash
75
  $ python detect.py --source 0 # webcam
76
  file.jpg # image
 
82
  http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream
83
  ```
84
 
85
+ To run inference on example images in `data/images`:
86
  ```bash
87
+ $ python detect.py --source data/images --weights yolov5s.pt --conf 0.25
88
 
89
+ Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, output='runs/detect', save_conf=False, save_txt=False, source='data/images', update=False, view_img=False, weights='yolov5s.pt')
90
  Using CUDA device0 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', total_memory=16160MB)
91
 
92
  Downloading https://github.com/ultralytics/yolov5/releases/download/v3.0/yolov5s.pt to yolov5s.pt... 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14.5M/14.5M [00:00<00:00, 21.3MB/s]
93
 
94
  Fusing layers...
95
  Model Summary: 140 layers, 7.45958e+06 parameters, 0 gradients
96
+ image 1/2 data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.013s)
97
+ image 2/2 data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.013s)
98
+ Results saved to runs/detect/exp0
99
  Done. (0.124s)
100
  ```
101
  <img src="https://user-images.githubusercontent.com/26833433/97107365-685a8d80-16c7-11eb-8c2e-83aac701d8b9.jpeg" width="500">
{inference β†’ data}/images/bus.jpg RENAMED
File without changes
{inference β†’ data}/images/zidane.jpg RENAMED
File without changes
detect.py CHANGED
@@ -1,6 +1,5 @@
1
  import argparse
2
  import os
3
- import shutil
4
  import time
5
  from pathlib import Path
6
 
@@ -11,23 +10,25 @@ from numpy import random
11
 
12
  from models.experimental import attempt_load
13
  from utils.datasets import LoadStreams, LoadImages
14
- from utils.general import (
15
- check_img_size, non_max_suppression, apply_classifier, scale_coords,
16
- xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
17
  from utils.torch_utils import select_device, load_classifier, time_synchronized
18
 
19
 
20
  def detect(save_img=False):
21
- out, source, weights, view_img, save_txt, imgsz = \
22
- opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
23
  webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
24
 
 
 
 
 
 
 
25
  # Initialize
26
  set_logging()
27
  device = select_device(opt.device)
28
- if os.path.exists(out): # output dir
29
- shutil.rmtree(out) # delete dir
30
- os.makedirs(out) # make new dir
31
  half = device.type != 'cpu' # half precision only supported on CUDA
32
 
33
  # Load model
@@ -83,12 +84,12 @@ def detect(save_img=False):
83
  # Process detections
84
  for i, det in enumerate(pred): # detections per image
85
  if webcam: # batch_size >= 1
86
- p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
87
  else:
88
- p, s, im0 = path, '', im0s
89
 
90
- save_path = str(Path(out) / Path(p).name)
91
- txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
92
  s += '%gx%g ' % img.shape[2:] # print string
93
  gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
94
  if det is not None and len(det):
@@ -104,7 +105,7 @@ def detect(save_img=False):
104
  for *xyxy, conf, cls in reversed(det):
105
  if save_txt: # Write to file
106
  xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
107
- line = (cls, conf, *xywh) if opt.save_conf else (cls, *xywh) # label format
108
  with open(txt_path + '.txt', 'a') as f:
109
  f.write(('%g ' * len(line) + '\n') % line)
110
 
@@ -139,7 +140,7 @@ def detect(save_img=False):
139
  vid_writer.write(im0)
140
 
141
  if save_txt or save_img:
142
- print('Results saved to %s' % Path(out))
143
 
144
  print('Done. (%.3fs)' % (time.time() - t0))
145
 
@@ -147,15 +148,16 @@ def detect(save_img=False):
147
  if __name__ == '__main__':
148
  parser = argparse.ArgumentParser()
149
  parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
150
- parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
151
  parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
152
  parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
153
  parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
154
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
155
  parser.add_argument('--view-img', action='store_true', help='display results')
156
- parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
157
  parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
158
- parser.add_argument('--save-dir', type=str, default='inference/output', help='directory to save results')
 
159
  parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
160
  parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
161
  parser.add_argument('--augment', action='store_true', help='augmented inference')
 
1
  import argparse
2
  import os
 
3
  import time
4
  from pathlib import Path
5
 
 
10
 
11
  from models.experimental import attempt_load
12
  from utils.datasets import LoadStreams, LoadImages
13
+ from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, \
14
+ plot_one_box, strip_optimizer, set_logging, increment_dir
 
15
  from utils.torch_utils import select_device, load_classifier, time_synchronized
16
 
17
 
18
  def detect(save_img=False):
19
+ save_dir, source, weights, view_img, save_txt, imgsz = \
20
+ Path(opt.save_dir), opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
21
  webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
22
 
23
+ # Directories
24
+ if save_dir == Path('runs/detect'): # if default
25
+ os.makedirs('runs/detect', exist_ok=True) # make base
26
+ save_dir = Path(increment_dir(save_dir / 'exp', opt.name)) # increment run
27
+ os.makedirs(save_dir / 'labels' if save_txt else save_dir, exist_ok=True) # make new dir
28
+
29
  # Initialize
30
  set_logging()
31
  device = select_device(opt.device)
 
 
 
32
  half = device.type != 'cpu' # half precision only supported on CUDA
33
 
34
  # Load model
 
84
  # Process detections
85
  for i, det in enumerate(pred): # detections per image
86
  if webcam: # batch_size >= 1
87
+ p, s, im0 = Path(path[i]), '%g: ' % i, im0s[i].copy()
88
  else:
89
+ p, s, im0 = Path(path), '', im0s
90
 
91
+ save_path = str(save_dir / p.name)
92
+ txt_path = str(save_dir / 'labels' / p.stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
93
  s += '%gx%g ' % img.shape[2:] # print string
94
  gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
95
  if det is not None and len(det):
 
105
  for *xyxy, conf, cls in reversed(det):
106
  if save_txt: # Write to file
107
  xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
108
+ line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
109
  with open(txt_path + '.txt', 'a') as f:
110
  f.write(('%g ' * len(line) + '\n') % line)
111
 
 
140
  vid_writer.write(im0)
141
 
142
  if save_txt or save_img:
143
+ print('Results saved to %s' % save_dir)
144
 
145
  print('Done. (%.3fs)' % (time.time() - t0))
146
 
 
148
  if __name__ == '__main__':
149
  parser = argparse.ArgumentParser()
150
  parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
151
+ parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
152
  parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
153
  parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
154
  parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
155
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
156
  parser.add_argument('--view-img', action='store_true', help='display results')
157
+ parser.add_argument('--save-txt', action='store_false', help='save results to *.txt')
158
  parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
159
+ parser.add_argument('--save-dir', type=str, default='runs/detect', help='directory to save results')
160
+ parser.add_argument('--name', default='', help='name to append to --save-dir: i.e. runs/{N} -> runs/{N}_{name}')
161
  parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
162
  parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
163
  parser.add_argument('--augment', action='store_true', help='augmented inference')
hubconf.py CHANGED
@@ -113,6 +113,6 @@ if __name__ == '__main__':
113
  # Verify inference
114
  from PIL import Image
115
 
116
- img = Image.open('inference/images/zidane.jpg')
117
  y = model(img)
118
  print(y[0].shape)
 
113
  # Verify inference
114
  from PIL import Image
115
 
116
+ img = Image.open('data/images/zidane.jpg')
117
  y = model(img)
118
  print(y[0].shape)
sotabench.py DELETED
@@ -1,307 +0,0 @@
1
- import argparse
2
- import glob
3
- import os
4
- import shutil
5
- from pathlib import Path
6
-
7
- import numpy as np
8
- import torch
9
- import yaml
10
- from sotabencheval.object_detection import COCOEvaluator
11
- from sotabencheval.utils import is_server
12
- from tqdm import tqdm
13
-
14
- from models.experimental import attempt_load
15
- from utils.datasets import create_dataloader
16
- from utils.general import (
17
- coco80_to_coco91_class, check_dataset, check_file, check_img_size, compute_loss, non_max_suppression, scale_coords,
18
- xyxy2xywh, clip_coords, set_logging)
19
- from utils.torch_utils import select_device, time_synchronized
20
-
21
- DATA_ROOT = './.data/vision/coco' if is_server() else '../coco' # sotabench data dir
22
-
23
-
24
- def test(data,
25
- weights=None,
26
- batch_size=16,
27
- imgsz=640,
28
- conf_thres=0.001,
29
- iou_thres=0.6, # for NMS
30
- save_json=False,
31
- single_cls=False,
32
- augment=False,
33
- verbose=False,
34
- model=None,
35
- dataloader=None,
36
- save_dir='',
37
- merge=False,
38
- save_txt=False):
39
- # Initialize/load model and set device
40
- training = model is not None
41
- if training: # called by train.py
42
- device = next(model.parameters()).device # get model device
43
-
44
- else: # called directly
45
- set_logging()
46
- device = select_device(opt.device, batch_size=batch_size)
47
- merge, save_txt = opt.merge, opt.save_txt # use Merge NMS, save *.txt labels
48
- if save_txt:
49
- out = Path('inference/output')
50
- if os.path.exists(out):
51
- shutil.rmtree(out) # delete output folder
52
- os.makedirs(out) # make new output folder
53
-
54
- # Remove previous
55
- for f in glob.glob(str(Path(save_dir) / 'test_batch*.jpg')):
56
- os.remove(f)
57
-
58
- # Load model
59
- model = attempt_load(weights, map_location=device) # load FP32 model
60
- imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
61
-
62
- # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
63
- # if device.type != 'cpu' and torch.cuda.device_count() > 1:
64
- # model = nn.DataParallel(model)
65
-
66
- # Half
67
- half = device.type != 'cpu' # half precision only supported on CUDA
68
- if half:
69
- model.half()
70
-
71
- # Configure
72
- model.eval()
73
- with open(data) as f:
74
- data = yaml.load(f, Loader=yaml.FullLoader) # model dict
75
- check_dataset(data) # check
76
- nc = 1 if single_cls else int(data['nc']) # number of classes
77
- iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
78
- niou = iouv.numel()
79
-
80
- # Dataloader
81
- if not training:
82
- img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
83
- _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
84
- path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
85
- dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt,
86
- hyp=None, augment=False, cache=True, pad=0.5, rect=True)[0]
87
-
88
- seen = 0
89
- names = model.names if hasattr(model, 'names') else model.module.names
90
- coco91class = coco80_to_coco91_class()
91
- s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
92
- p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
93
- loss = torch.zeros(3, device=device)
94
- jdict, stats, ap, ap_class = [], [], [], []
95
- evaluator = COCOEvaluator(root=DATA_ROOT, model_name=opt.weights.replace('.pt', ''))
96
- for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
97
- img = img.to(device, non_blocking=True)
98
- img = img.half() if half else img.float() # uint8 to fp16/32
99
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
100
- targets = targets.to(device)
101
- nb, _, height, width = img.shape # batch size, channels, height, width
102
- whwh = torch.Tensor([width, height, width, height]).to(device)
103
-
104
- # Disable gradients
105
- with torch.no_grad():
106
- # Run model
107
- t = time_synchronized()
108
- inf_out, train_out = model(img, augment=augment) # inference and training outputs
109
- t0 += time_synchronized() - t
110
-
111
- # Compute loss
112
- if training: # if model has loss hyperparameters
113
- loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls
114
-
115
- # Run NMS
116
- t = time_synchronized()
117
- output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, merge=merge)
118
- t1 += time_synchronized() - t
119
-
120
- # Statistics per image
121
- for si, pred in enumerate(output):
122
- labels = targets[targets[:, 0] == si, 1:]
123
- nl = len(labels)
124
- tcls = labels[:, 0].tolist() if nl else [] # target class
125
- seen += 1
126
-
127
- if pred is None:
128
- if nl:
129
- stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
130
- continue
131
-
132
- # Append to text file
133
- if save_txt:
134
- gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
135
- x = pred.clone()
136
- x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4], shapes[si][0], shapes[si][1]) # to original
137
- for *xyxy, conf, cls in x:
138
- xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
139
- with open(str(out / Path(paths[si]).stem) + '.txt', 'a') as f:
140
- f.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
141
-
142
- # Clip boxes to image bounds
143
- clip_coords(pred, (height, width))
144
-
145
- # Append to pycocotools JSON dictionary
146
- if save_json:
147
- # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
148
- image_id = Path(paths[si]).stem
149
- box = pred[:, :4].clone() # xyxy
150
- scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
151
- box = xyxy2xywh(box) # xywh
152
- box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
153
- for p, b in zip(pred.tolist(), box.tolist()):
154
- result = {'image_id': int(image_id) if image_id.isnumeric() else image_id,
155
- 'category_id': coco91class[int(p[5])],
156
- 'bbox': [round(x, 3) for x in b],
157
- 'score': round(p[4], 5)}
158
- jdict.append(result)
159
-
160
- #evaluator.add([result])
161
- #if evaluator.cache_exists:
162
- # break
163
-
164
- # # Assign all predictions as incorrect
165
- # correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
166
- # if nl:
167
- # detected = [] # target indices
168
- # tcls_tensor = labels[:, 0]
169
- #
170
- # # target boxes
171
- # tbox = xywh2xyxy(labels[:, 1:5]) * whwh
172
- #
173
- # # Per target class
174
- # for cls in torch.unique(tcls_tensor):
175
- # ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
176
- # pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
177
- #
178
- # # Search for detections
179
- # if pi.shape[0]:
180
- # # Prediction to target ious
181
- # ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
182
- #
183
- # # Append detections
184
- # detected_set = set()
185
- # for j in (ious > iouv[0]).nonzero(as_tuple=False):
186
- # d = ti[i[j]] # detected target
187
- # if d.item() not in detected_set:
188
- # detected_set.add(d.item())
189
- # detected.append(d)
190
- # correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
191
- # if len(detected) == nl: # all targets already located in image
192
- # break
193
- #
194
- # # Append statistics (correct, conf, pcls, tcls)
195
- # stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
196
-
197
- # # Plot images
198
- # if batch_i < 1:
199
- # f = Path(save_dir) / ('test_batch%g_gt.jpg' % batch_i) # filename
200
- # plot_images(img, targets, paths, str(f), names) # ground truth
201
- # f = Path(save_dir) / ('test_batch%g_pred.jpg' % batch_i)
202
- # plot_images(img, output_to_target(output, width, height), paths, str(f), names) # predictions
203
-
204
- evaluator.add(jdict)
205
- evaluator.save()
206
-
207
- # # Compute statistics
208
- # stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
209
- # if len(stats) and stats[0].any():
210
- # p, r, ap, f1, ap_class = ap_per_class(*stats)
211
- # p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
212
- # mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
213
- # nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
214
- # else:
215
- # nt = torch.zeros(1)
216
- #
217
- # # Print results
218
- # pf = '%20s' + '%12.3g' * 6 # print format
219
- # print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
220
- #
221
- # # Print results per class
222
- # if verbose and nc > 1 and len(stats):
223
- # for i, c in enumerate(ap_class):
224
- # print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
225
- #
226
- # # Print speeds
227
- # t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
228
- # if not training:
229
- # print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
230
- #
231
- # # Save JSON
232
- # if save_json and len(jdict):
233
- # f = 'detections_val2017_%s_results.json' % \
234
- # (weights.split(os.sep)[-1].replace('.pt', '') if isinstance(weights, str) else '') # filename
235
- # print('\nCOCO mAP with pycocotools... saving %s...' % f)
236
- # with open(f, 'w') as file:
237
- # json.dump(jdict, file)
238
- #
239
- # try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
240
- # from pycocotools.coco import COCO
241
- # from pycocotools.cocoeval import COCOeval
242
- #
243
- # imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files]
244
- # cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0]) # initialize COCO ground truth api
245
- # cocoDt = cocoGt.loadRes(f) # initialize COCO pred api
246
- # cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
247
- # cocoEval.params.imgIds = imgIds # image IDs to evaluate
248
- # cocoEval.evaluate()
249
- # cocoEval.accumulate()
250
- # cocoEval.summarize()
251
- # map, map50 = cocoEval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
252
- # except Exception as e:
253
- # print('ERROR: pycocotools unable to run: %s' % e)
254
- #
255
- # # Return results
256
- # model.float() # for training
257
- # maps = np.zeros(nc) + map
258
- # for i, c in enumerate(ap_class):
259
- # maps[c] = ap[i]
260
- # return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
261
-
262
-
263
- if __name__ == '__main__':
264
- parser = argparse.ArgumentParser(prog='test.py')
265
- parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
266
- parser.add_argument('--data', type=str, default='data/coco.yaml', help='*.data path')
267
- parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
268
- parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
269
- parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
270
- parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS')
271
- parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
272
- parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
273
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
274
- parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
275
- parser.add_argument('--augment', action='store_true', help='augmented inference')
276
- parser.add_argument('--merge', action='store_true', help='use Merge NMS')
277
- parser.add_argument('--verbose', action='store_true', help='report mAP by class')
278
- parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
279
- opt = parser.parse_args()
280
- opt.save_json |= opt.data.endswith('coco.yaml')
281
- opt.data = check_file(opt.data) # check file
282
- print(opt)
283
-
284
- if opt.task in ['val', 'test']: # run normally
285
- test(opt.data,
286
- opt.weights,
287
- opt.batch_size,
288
- opt.img_size,
289
- opt.conf_thres,
290
- opt.iou_thres,
291
- opt.save_json,
292
- opt.single_cls,
293
- opt.augment,
294
- opt.verbose)
295
-
296
- elif opt.task == 'study': # run over a range of settings and save/plot
297
- for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
298
- f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
299
- x = list(range(320, 800, 64)) # x axis
300
- y = [] # y axis
301
- for i in x: # img-size
302
- print('\nRunning %s point %s...' % (f, i))
303
- r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json)
304
- y.append(r + t) # results and times
305
- np.savetxt(f, y, fmt='%10.4g') # save
306
- os.system('zip -r study.zip study_*.txt')
307
- # utils.general.plot_study_txt(f, x) # plot
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.py CHANGED
@@ -2,7 +2,6 @@ import argparse
2
  import glob
3
  import json
4
  import os
5
- import shutil
6
  from pathlib import Path
7
 
8
  import numpy as np
@@ -12,9 +11,9 @@ from tqdm import tqdm
12
 
13
  from models.experimental import attempt_load
14
  from utils.datasets import create_dataloader
15
- from utils.general import (
16
- coco80_to_coco91_class, check_dataset, check_file, check_img_size, compute_loss, non_max_suppression, scale_coords,
17
- xyxy2xywh, clip_coords, plot_images, xywh2xyxy, box_iou, output_to_target, ap_per_class, set_logging)
18
  from utils.torch_utils import select_device, time_synchronized
19
 
20
 
@@ -46,16 +45,11 @@ def test(data,
46
  device = select_device(opt.device, batch_size=batch_size)
47
  save_txt = opt.save_txt # save *.txt labels
48
 
49
- # Remove previous
50
- if os.path.exists(save_dir):
51
- shutil.rmtree(save_dir) # delete dir
52
- os.makedirs(save_dir) # make new dir
53
-
54
- if save_txt:
55
- out = save_dir / 'autolabels'
56
- if os.path.exists(out):
57
- shutil.rmtree(out) # delete dir
58
- os.makedirs(out) # make new dir
59
 
60
  # Load model
61
  model = attempt_load(weights, map_location=device) # load FP32 model
@@ -144,8 +138,8 @@ def test(data,
144
  x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4], shapes[si][0], shapes[si][1]) # to original
145
  for *xyxy, conf, cls in x:
146
  xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
147
- line = (cls, conf, *xywh) if save_conf else (cls, *xywh) # label format
148
- with open(str(out / Path(paths[si]).stem) + '.txt', 'a') as f:
149
  f.write(('%g ' * len(line) + '\n') % line)
150
 
151
  # W&B logging
@@ -268,6 +262,7 @@ def test(data,
268
  print('ERROR: pycocotools unable to run: %s' % e)
269
 
270
  # Return results
 
271
  model.float() # for training
272
  maps = np.zeros(nc) + map
273
  for i, c in enumerate(ap_class):
@@ -292,6 +287,7 @@ if __name__ == '__main__':
292
  parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
293
  parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
294
  parser.add_argument('--save-dir', type=str, default='runs/test', help='directory to save results')
 
295
  opt = parser.parse_args()
296
  opt.save_json |= opt.data.endswith('coco.yaml')
297
  opt.data = check_file(opt.data) # check file
@@ -313,8 +309,6 @@ if __name__ == '__main__':
313
  save_conf=opt.save_conf,
314
  )
315
 
316
- print('Results saved to %s' % opt.save_dir)
317
-
318
  elif opt.task == 'study': # run over a range of settings and save/plot
319
  for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
320
  f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
 
2
  import glob
3
  import json
4
  import os
 
5
  from pathlib import Path
6
 
7
  import numpy as np
 
11
 
12
  from models.experimental import attempt_load
13
  from utils.datasets import create_dataloader
14
+ from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, compute_loss, \
15
+ non_max_suppression, scale_coords, xyxy2xywh, clip_coords, plot_images, xywh2xyxy, box_iou, output_to_target, \
16
+ ap_per_class, set_logging, increment_dir
17
  from utils.torch_utils import select_device, time_synchronized
18
 
19
 
 
45
  device = select_device(opt.device, batch_size=batch_size)
46
  save_txt = opt.save_txt # save *.txt labels
47
 
48
+ # Directories
49
+ if save_dir == Path('runs/test'): # if default
50
+ os.makedirs('runs/test', exist_ok=True) # make base
51
+ save_dir = Path(increment_dir(save_dir / 'exp', opt.name)) # increment run
52
+ os.makedirs(save_dir / 'labels' if save_txt else save_dir, exist_ok=True) # make new dir
 
 
 
 
 
53
 
54
  # Load model
55
  model = attempt_load(weights, map_location=device) # load FP32 model
 
138
  x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4], shapes[si][0], shapes[si][1]) # to original
139
  for *xyxy, conf, cls in x:
140
  xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
141
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
142
+ with open(str(save_dir / 'labels' / Path(paths[si]).stem) + '.txt', 'a') as f:
143
  f.write(('%g ' * len(line) + '\n') % line)
144
 
145
  # W&B logging
 
262
  print('ERROR: pycocotools unable to run: %s' % e)
263
 
264
  # Return results
265
+ print('Results saved to %s' % save_dir)
266
  model.float() # for training
267
  maps = np.zeros(nc) + map
268
  for i, c in enumerate(ap_class):
 
287
  parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
288
  parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
289
  parser.add_argument('--save-dir', type=str, default='runs/test', help='directory to save results')
290
+ parser.add_argument('--name', default='', help='name to append to --save-dir: i.e. runs/{N} -> runs/{N}_{name}')
291
  opt = parser.parse_args()
292
  opt.save_json |= opt.data.endswith('coco.yaml')
293
  opt.data = check_file(opt.data) # check file
 
309
  save_conf=opt.save_conf,
310
  )
311
 
 
 
312
  elif opt.task == 'study': # run over a range of settings and save/plot
313
  for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
314
  f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
train.py CHANGED
@@ -1,5 +1,6 @@
1
  import argparse
2
  import logging
 
3
  import os
4
  import random
5
  import shutil
@@ -7,7 +8,6 @@ import time
7
  from pathlib import Path
8
  from warnings import warn
9
 
10
- import math
11
  import numpy as np
12
  import torch.distributed as dist
13
  import torch.nn as nn
@@ -404,14 +404,14 @@ if __name__ == '__main__':
404
  parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
405
  parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
406
  parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
407
- parser.add_argument('--name', default='', help='renames experiment folder exp{N} to exp{N}_{name} if supplied')
408
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
409
  parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
410
  parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
411
  parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
412
  parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
413
  parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
414
- parser.add_argument('--logdir', type=str, default='runs/', help='logging directory')
 
415
  parser.add_argument('--log-imgs', type=int, default=10, help='number of images for W&B logging, max 100')
416
  parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
417
 
@@ -428,7 +428,7 @@ if __name__ == '__main__':
428
  # Resume
429
  if opt.resume: # resume an interrupted run
430
  ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
431
- log_dir = Path(ckpt).parent.parent # runs/exp0
432
  assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
433
  with open(log_dir / 'opt.yaml') as f:
434
  opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
@@ -467,14 +467,13 @@ if __name__ == '__main__':
467
  if opt.global_rank in [-1, 0]:
468
  # Tensorboard
469
  logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.logdir}", view at http://localhost:6006/')
470
- tb_writer = SummaryWriter(log_dir=log_dir) # runs/exp0
471
 
472
  # W&B
473
  try:
474
  import wandb
475
 
476
  assert os.environ.get('WANDB_DISABLED') != 'true'
477
- logger.info("Weights & Biases logging enabled, to disable set os.environ['WANDB_DISABLED'] = 'true'")
478
  except (ImportError, AssertionError):
479
  opt.log_imgs = 0
480
  logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
 
1
  import argparse
2
  import logging
3
+ import math
4
  import os
5
  import random
6
  import shutil
 
8
  from pathlib import Path
9
  from warnings import warn
10
 
 
11
  import numpy as np
12
  import torch.distributed as dist
13
  import torch.nn as nn
 
404
  parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
405
  parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
406
  parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
 
407
  parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
408
  parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
409
  parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
410
  parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
411
  parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
412
  parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
413
+ parser.add_argument('--logdir', type=str, default='runs/train', help='logging directory')
414
+ parser.add_argument('--name', default='', help='name to append to --save-dir: i.e. runs/{N} -> runs/{N}_{name}')
415
  parser.add_argument('--log-imgs', type=int, default=10, help='number of images for W&B logging, max 100')
416
  parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
417
 
 
428
  # Resume
429
  if opt.resume: # resume an interrupted run
430
  ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
431
+ log_dir = Path(ckpt).parent.parent # runs/train/exp0
432
  assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
433
  with open(log_dir / 'opt.yaml') as f:
434
  opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
 
467
  if opt.global_rank in [-1, 0]:
468
  # Tensorboard
469
  logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.logdir}", view at http://localhost:6006/')
470
+ tb_writer = SummaryWriter(log_dir=log_dir) # runs/train/exp0
471
 
472
  # W&B
473
  try:
474
  import wandb
475
 
476
  assert os.environ.get('WANDB_DISABLED') != 'true'
 
477
  except (ImportError, AssertionError):
478
  opt.log_imgs = 0
479
  logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
tutorial.ipynb CHANGED
@@ -596,22 +596,22 @@
596
  }
597
  },
598
  "source": [
599
- "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source inference/images/\n",
600
- "Image(filename='inference/output/zidane.jpg', width=600)"
601
  ],
602
  "execution_count": null,
603
  "outputs": [
604
  {
605
  "output_type": "stream",
606
  "text": [
607
- "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, save_conf=False, save_dir='inference/output', save_txt=False, source='inference/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n",
608
  "Using CUDA device0 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', total_memory=16130MB)\n",
609
  "\n",
610
  "Fusing layers... \n",
611
  "Model Summary: 140 layers, 7.45958e+06 parameters, 0 gradients\n",
612
- "image 1/2 /content/yolov5/inference/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.012s)\n",
613
- "image 2/2 /content/yolov5/inference/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.012s)\n",
614
- "Results saved to inference/output\n",
615
  "Done. (0.113s)\n"
616
  ],
617
  "name": "stdout"
@@ -640,7 +640,7 @@
640
  "id": "4qbaa3iEcrcE"
641
  },
642
  "source": [
643
- "Results are saved to `inference/output`. A full list of available inference sources:\n",
644
  "<img src=\"https://user-images.githubusercontent.com/26833433/98274798-2b7a7a80-1f94-11eb-91a4-70c73593e26b.jpg\" width=\"900\"> "
645
  ]
646
  },
@@ -887,7 +887,7 @@
887
  "source": [
888
  "Train a YOLOv5s model on [COCO128](https://www.kaggle.com/ultralytics/coco128) with dataset `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and **COCO, COCO128, and VOC datasets are downloaded automatically** on first use.\n",
889
  "\n",
890
- "All training results are saved to `runs/exp0` for the first experiment, then `runs/exp1`, `runs/exp2` etc. for subsequent experiments.\n"
891
  ]
892
  },
893
  {
@@ -969,7 +969,7 @@
969
  "Analyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
970
  "Image sizes 640 train, 640 test\n",
971
  "Using 2 dataloader workers\n",
972
- "Logging results to runs/exp0\n",
973
  "Starting training for 3 epochs...\n",
974
  "\n",
975
  " Epoch gpu_mem box obj cls total targets img_size\n",
@@ -986,8 +986,8 @@
986
  " 2/2 3.17G 0.04445 0.06545 0.01666 0.1266 149 640: 100% 8/8 [00:01<00:00, 4.33it/s]\n",
987
  " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:02<00:00, 2.78it/s]\n",
988
  " all 128 929 0.395 0.766 0.701 0.455\n",
989
- "Optimizer stripped from runs/exp0/weights/last.pt, 15.2MB\n",
990
- "Optimizer stripped from runs/exp0/weights/best.pt, 15.2MB\n",
991
  "3 epochs completed in 0.005 hours.\n",
992
  "\n"
993
  ],
@@ -1030,7 +1030,7 @@
1030
  "source": [
1031
  "## Local Logging\n",
1032
  "\n",
1033
- "All results are logged by default to the `runs/exp0` directory, with a new directory created for each new training as `runs/exp1`, `runs/exp2`, etc. View train and test jpgs to see mosaics, labels/predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)."
1034
  ]
1035
  },
1036
  {
@@ -1039,9 +1039,9 @@
1039
  "id": "riPdhraOTCO0"
1040
  },
1041
  "source": [
1042
- "Image(filename='runs/exp0/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n",
1043
- "Image(filename='runs/exp0/test_batch0_gt.jpg', width=800) # test batch 0 ground truth\n",
1044
- "Image(filename='runs/exp0/test_batch0_pred.jpg', width=800) # test batch 0 predictions"
1045
  ],
1046
  "execution_count": null,
1047
  "outputs": []
@@ -1078,7 +1078,7 @@
1078
  },
1079
  "source": [
1080
  "from utils.utils import plot_results \n",
1081
- "plot_results(save_dir='runs/exp0') # plot results.txt as results.png\n",
1082
  "Image(filename='results.png', width=800) "
1083
  ],
1084
  "execution_count": null,
@@ -1170,9 +1170,9 @@
1170
  " for di in 0 cpu # inference devices\n",
1171
  " do\n",
1172
  " python detect.py --weights $x.pt --device $di # detect official\n",
1173
- " python detect.py --weights runs/exp0/weights/last.pt --device $di # detect custom\n",
1174
  " python test.py --weights $x.pt --device $di # test official\n",
1175
- " python test.py --weights runs/exp0/weights/last.pt --device $di # test custom\n",
1176
  " done\n",
1177
  " python models/yolo.py --cfg $x.yaml # inspect\n",
1178
  " python models/export.py --weights $x.pt --img 640 --batch 1 # export\n",
 
596
  }
597
  },
598
  "source": [
599
+ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n",
600
+ "Image(filename='runs/detect/exp0/zidane.jpg', width=600)"
601
  ],
602
  "execution_count": null,
603
  "outputs": [
604
  {
605
  "output_type": "stream",
606
  "text": [
607
+ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, save_conf=False, save_dir='runs/detect', save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n",
608
  "Using CUDA device0 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', total_memory=16130MB)\n",
609
  "\n",
610
  "Fusing layers... \n",
611
  "Model Summary: 140 layers, 7.45958e+06 parameters, 0 gradients\n",
612
+ "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.012s)\n",
613
+ "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.012s)\n",
614
+ "Results saved to runs/detect/exp0\n",
615
  "Done. (0.113s)\n"
616
  ],
617
  "name": "stdout"
 
640
  "id": "4qbaa3iEcrcE"
641
  },
642
  "source": [
643
+ "Results are saved to `runs/detect`. A full list of available inference sources:\n",
644
  "<img src=\"https://user-images.githubusercontent.com/26833433/98274798-2b7a7a80-1f94-11eb-91a4-70c73593e26b.jpg\" width=\"900\"> "
645
  ]
646
  },
 
887
  "source": [
888
  "Train a YOLOv5s model on [COCO128](https://www.kaggle.com/ultralytics/coco128) with dataset `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and **COCO, COCO128, and VOC datasets are downloaded automatically** on first use.\n",
889
  "\n",
890
+ "All training results are saved to `runs/train/exp0` for the first experiment, then `runs/exp1`, `runs/exp2` etc. for subsequent experiments.\n"
891
  ]
892
  },
893
  {
 
969
  "Analyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
970
  "Image sizes 640 train, 640 test\n",
971
  "Using 2 dataloader workers\n",
972
+ "Logging results to runs/train/exp0\n",
973
  "Starting training for 3 epochs...\n",
974
  "\n",
975
  " Epoch gpu_mem box obj cls total targets img_size\n",
 
986
  " 2/2 3.17G 0.04445 0.06545 0.01666 0.1266 149 640: 100% 8/8 [00:01<00:00, 4.33it/s]\n",
987
  " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:02<00:00, 2.78it/s]\n",
988
  " all 128 929 0.395 0.766 0.701 0.455\n",
989
+ "Optimizer stripped from runs/train/exp0/weights/last.pt, 15.2MB\n",
990
+ "Optimizer stripped from runs/train/exp0/weights/best.pt, 15.2MB\n",
991
  "3 epochs completed in 0.005 hours.\n",
992
  "\n"
993
  ],
 
1030
  "source": [
1031
  "## Local Logging\n",
1032
  "\n",
1033
+ "All results are logged by default to the `runs/train/exp0` directory, with a new directory created for each new training as `runs/exp1`, `runs/exp2`, etc. View train and test jpgs to see mosaics, labels/predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)."
1034
  ]
1035
  },
1036
  {
 
1039
  "id": "riPdhraOTCO0"
1040
  },
1041
  "source": [
1042
+ "Image(filename='runs/train/exp0/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n",
1043
+ "Image(filename='runs/train/exp0/test_batch0_gt.jpg', width=800) # test batch 0 ground truth\n",
1044
+ "Image(filename='runs/train/exp0/test_batch0_pred.jpg', width=800) # test batch 0 predictions"
1045
  ],
1046
  "execution_count": null,
1047
  "outputs": []
 
1078
  },
1079
  "source": [
1080
  "from utils.utils import plot_results \n",
1081
+ "plot_results(save_dir='runs/train/exp0') # plot results.txt as results.png\n",
1082
  "Image(filename='results.png', width=800) "
1083
  ],
1084
  "execution_count": null,
 
1170
  " for di in 0 cpu # inference devices\n",
1171
  " do\n",
1172
  " python detect.py --weights $x.pt --device $di # detect official\n",
1173
+ " python detect.py --weights runs/train/exp0/weights/last.pt --device $di # detect custom\n",
1174
  " python test.py --weights $x.pt --device $di # test official\n",
1175
+ " python test.py --weights runs/train/exp0/weights/last.pt --device $di # test custom\n",
1176
  " done\n",
1177
  " python models/yolo.py --cfg $x.yaml # inspect\n",
1178
  " python models/export.py --weights $x.pt --img 640 --batch 1 # export\n",
utils/general.py CHANGED
@@ -955,9 +955,15 @@ def increment_dir(dir, comment=''):
955
  # Increments a directory runs/exp1 --> runs/exp2_comment
956
  n = 0 # number
957
  dir = str(Path(dir)) # os-agnostic
 
 
 
 
 
 
958
  dirs = sorted(glob.glob(dir + '*')) # directories
959
  if dirs:
960
- matches = [re.search(r"exp(\d+)", d) for d in dirs]
961
  idxs = [int(m.groups()[0]) for m in matches if m]
962
  if idxs:
963
  n = max(idxs) + 1 # increment
@@ -1262,7 +1268,7 @@ def plot_results_overlay(start=0, stop=0): # from utils.general import *; plot_
1262
 
1263
 
1264
  def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
1265
- # from utils.general import *; plot_results(save_dir='runs/exp0')
1266
  # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
1267
  fig, ax = plt.subplots(2, 5, figsize=(12, 6))
1268
  ax = ax.ravel()
 
955
  # Increments a directory runs/exp1 --> runs/exp2_comment
956
  n = 0 # number
957
  dir = str(Path(dir)) # os-agnostic
958
+ if os.path.isdir(dir):
959
+ stem = ''
960
+ dir += os.sep # removed by Path
961
+ else:
962
+ stem = Path(dir).stem
963
+
964
  dirs = sorted(glob.glob(dir + '*')) # directories
965
  if dirs:
966
+ matches = [re.search(r"%s(\d+)" % stem, d) for d in dirs]
967
  idxs = [int(m.groups()[0]) for m in matches if m]
968
  if idxs:
969
  n = max(idxs) + 1 # increment
 
1268
 
1269
 
1270
  def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
1271
+ # from utils.general import *; plot_results(save_dir='runs/train/exp0')
1272
  # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
1273
  fig, ax = plt.subplots(2, 5, figsize=(12, 6))
1274
  ax = ax.ravel()