glenn-jocher
commited on
Commit
•
96e36a7
1
Parent(s):
3764277
New CSV Logger (#4148)
Browse files* New CSV Logger
* cleanup
* move batch plots into Logger
* rename comment
* Remove total loss from progress bar
* mloss :-1 bug fix
* Update plot_results()
* Update plot_results()
* plot_results bug fix
- .gitignore +1 -0
- train.py +11 -29
- utils/loggers/__init__.py +38 -25
- utils/loss.py +1 -2
- utils/plots.py +16 -52
- val.py +1 -1
.gitignore
CHANGED
@@ -31,6 +31,7 @@ data/*
|
|
31 |
!data/*.sh
|
32 |
|
33 |
results*.txt
|
|
|
34 |
|
35 |
# Datasets -------------------------------------------------------------------------------------------------------------
|
36 |
coco/
|
|
|
31 |
!data/*.sh
|
32 |
|
33 |
results*.txt
|
34 |
+
results*.csv
|
35 |
|
36 |
# Datasets -------------------------------------------------------------------------------------------------------------
|
37 |
coco/
|
train.py
CHANGED
@@ -12,7 +12,6 @@ import sys
|
|
12 |
import time
|
13 |
from copy import deepcopy
|
14 |
from pathlib import Path
|
15 |
-
from threading import Thread
|
16 |
|
17 |
import math
|
18 |
import numpy as np
|
@@ -38,7 +37,7 @@ from utils.general import labels_to_class_weights, increment_path, labels_to_ima
|
|
38 |
check_requirements, print_mutation, set_logging, one_cycle, colorstr
|
39 |
from utils.google_utils import attempt_download
|
40 |
from utils.loss import ComputeLoss
|
41 |
-
from utils.plots import
|
42 |
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel
|
43 |
from utils.loggers.wandb.wandb_utils import check_wandb_resume
|
44 |
from utils.metrics import fitness
|
@@ -61,7 +60,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
61 |
# Directories
|
62 |
w = save_dir / 'weights' # weights dir
|
63 |
w.mkdir(parents=True, exist_ok=True) # make dir
|
64 |
-
last, best
|
65 |
|
66 |
# Hyperparameters
|
67 |
if isinstance(hyp, str):
|
@@ -88,7 +87,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
88 |
|
89 |
# Loggers
|
90 |
if RANK in [-1, 0]:
|
91 |
-
loggers = Loggers(save_dir,
|
92 |
if loggers.wandb and resume:
|
93 |
weights, epochs, hyp, data_dict = opt.weights, opt.epochs, opt.hyp, loggers.wandb.data_dict
|
94 |
|
@@ -167,10 +166,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
167 |
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
|
168 |
ema.updates = ckpt['updates']
|
169 |
|
170 |
-
# Results
|
171 |
-
if ckpt.get('training_results') is not None:
|
172 |
-
results_file.write_text(ckpt['training_results']) # write results.txt
|
173 |
-
|
174 |
# Epochs
|
175 |
start_epoch = ckpt['epoch'] + 1
|
176 |
if resume:
|
@@ -275,11 +270,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
275 |
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
|
276 |
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
|
277 |
|
278 |
-
mloss = torch.zeros(
|
279 |
if RANK != -1:
|
280 |
train_loader.sampler.set_epoch(epoch)
|
281 |
pbar = enumerate(train_loader)
|
282 |
-
LOGGER.info(('\n' + '%10s' *
|
283 |
if RANK in [-1, 0]:
|
284 |
pbar = tqdm(pbar, total=nb) # progress bar
|
285 |
optimizer.zero_grad()
|
@@ -327,20 +322,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
327 |
ema.update(model)
|
328 |
last_opt_step = ni
|
329 |
|
330 |
-
#
|
331 |
if RANK in [-1, 0]:
|
332 |
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
|
333 |
mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
|
334 |
-
|
335 |
-
f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])
|
336 |
-
|
337 |
-
|
338 |
-
# Plot
|
339 |
-
if plots:
|
340 |
-
if ni < 3:
|
341 |
-
f = save_dir / f'train_batch{ni}.jpg' # filename
|
342 |
-
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
|
343 |
-
loggers.on_train_batch_end(ni, model, imgs)
|
344 |
|
345 |
# end batch ------------------------------------------------------------------------------------------------
|
346 |
|
@@ -371,13 +359,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
371 |
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
|
372 |
if fi > best_fitness:
|
373 |
best_fitness = fi
|
374 |
-
loggers.on_train_val_end(mloss, results, lr, epoch,
|
375 |
|
376 |
# Save model
|
377 |
if (not nosave) or (final_epoch and not evolve): # if save
|
378 |
ckpt = {'epoch': epoch,
|
379 |
'best_fitness': best_fitness,
|
380 |
-
'training_results': results_file.read_text(),
|
381 |
'model': deepcopy(de_parallel(model)).half(),
|
382 |
'ema': deepcopy(ema.ema).half(),
|
383 |
'updates': ema.updates,
|
@@ -395,9 +382,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
395 |
# end training -----------------------------------------------------------------------------------------------------
|
396 |
if RANK in [-1, 0]:
|
397 |
LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n')
|
398 |
-
if plots:
|
399 |
-
plot_results(save_dir=save_dir) # save as results.png
|
400 |
-
|
401 |
if not evolve:
|
402 |
if is_coco: # COCO dataset
|
403 |
for m in [last, best] if best.exists() else [last]: # speed, mAP tests
|
@@ -411,13 +395,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|
411 |
save_dir=save_dir,
|
412 |
save_json=True,
|
413 |
plots=False)
|
414 |
-
|
415 |
# Strip optimizers
|
416 |
for f in last, best:
|
417 |
if f.exists():
|
418 |
strip_optimizer(f) # strip optimizers
|
419 |
-
|
420 |
-
loggers.on_train_end(last, best)
|
421 |
|
422 |
torch.cuda.empty_cache()
|
423 |
return results
|
|
|
12 |
import time
|
13 |
from copy import deepcopy
|
14 |
from pathlib import Path
|
|
|
15 |
|
16 |
import math
|
17 |
import numpy as np
|
|
|
37 |
check_requirements, print_mutation, set_logging, one_cycle, colorstr
|
38 |
from utils.google_utils import attempt_download
|
39 |
from utils.loss import ComputeLoss
|
40 |
+
from utils.plots import plot_labels, plot_evolution
|
41 |
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel
|
42 |
from utils.loggers.wandb.wandb_utils import check_wandb_resume
|
43 |
from utils.metrics import fitness
|
|
|
60 |
# Directories
|
61 |
w = save_dir / 'weights' # weights dir
|
62 |
w.mkdir(parents=True, exist_ok=True) # make dir
|
63 |
+
last, best = w / 'last.pt', w / 'best.pt'
|
64 |
|
65 |
# Hyperparameters
|
66 |
if isinstance(hyp, str):
|
|
|
87 |
|
88 |
# Loggers
|
89 |
if RANK in [-1, 0]:
|
90 |
+
loggers = Loggers(save_dir, weights, opt, hyp, data_dict, LOGGER).start() # loggers dict
|
91 |
if loggers.wandb and resume:
|
92 |
weights, epochs, hyp, data_dict = opt.weights, opt.epochs, opt.hyp, loggers.wandb.data_dict
|
93 |
|
|
|
166 |
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
|
167 |
ema.updates = ckpt['updates']
|
168 |
|
|
|
|
|
|
|
|
|
169 |
# Epochs
|
170 |
start_epoch = ckpt['epoch'] + 1
|
171 |
if resume:
|
|
|
270 |
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
|
271 |
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
|
272 |
|
273 |
+
mloss = torch.zeros(3, device=device) # mean losses
|
274 |
if RANK != -1:
|
275 |
train_loader.sampler.set_epoch(epoch)
|
276 |
pbar = enumerate(train_loader)
|
277 |
+
LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size'))
|
278 |
if RANK in [-1, 0]:
|
279 |
pbar = tqdm(pbar, total=nb) # progress bar
|
280 |
optimizer.zero_grad()
|
|
|
322 |
ema.update(model)
|
323 |
last_opt_step = ni
|
324 |
|
325 |
+
# Log
|
326 |
if RANK in [-1, 0]:
|
327 |
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
|
328 |
mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
|
329 |
+
pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % (
|
330 |
+
f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
|
331 |
+
loggers.on_train_batch_end(ni, model, imgs, targets, paths, plots)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
332 |
|
333 |
# end batch ------------------------------------------------------------------------------------------------
|
334 |
|
|
|
359 |
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
|
360 |
if fi > best_fitness:
|
361 |
best_fitness = fi
|
362 |
+
loggers.on_train_val_end(mloss, results, lr, epoch, best_fitness, fi)
|
363 |
|
364 |
# Save model
|
365 |
if (not nosave) or (final_epoch and not evolve): # if save
|
366 |
ckpt = {'epoch': epoch,
|
367 |
'best_fitness': best_fitness,
|
|
|
368 |
'model': deepcopy(de_parallel(model)).half(),
|
369 |
'ema': deepcopy(ema.ema).half(),
|
370 |
'updates': ema.updates,
|
|
|
382 |
# end training -----------------------------------------------------------------------------------------------------
|
383 |
if RANK in [-1, 0]:
|
384 |
LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n')
|
|
|
|
|
|
|
385 |
if not evolve:
|
386 |
if is_coco: # COCO dataset
|
387 |
for m in [last, best] if best.exists() else [last]: # speed, mAP tests
|
|
|
395 |
save_dir=save_dir,
|
396 |
save_json=True,
|
397 |
plots=False)
|
|
|
398 |
# Strip optimizers
|
399 |
for f in last, best:
|
400 |
if f.exists():
|
401 |
strip_optimizer(f) # strip optimizers
|
402 |
+
loggers.on_train_end(last, best, plots)
|
|
|
403 |
|
404 |
torch.cuda.empty_cache()
|
405 |
return results
|
utils/loggers/__init__.py
CHANGED
@@ -1,15 +1,17 @@
|
|
1 |
# YOLOv5 experiment logging utils
|
2 |
|
3 |
import warnings
|
|
|
4 |
|
5 |
import torch
|
6 |
from torch.utils.tensorboard import SummaryWriter
|
7 |
|
8 |
from utils.general import colorstr, emojis
|
9 |
from utils.loggers.wandb.wandb_utils import WandbLogger
|
|
|
10 |
from utils.torch_utils import de_parallel
|
11 |
|
12 |
-
LOGGERS = ('
|
13 |
|
14 |
try:
|
15 |
import wandb
|
@@ -21,10 +23,8 @@ except (ImportError, AssertionError):
|
|
21 |
|
22 |
class Loggers():
|
23 |
# YOLOv5 Loggers class
|
24 |
-
def __init__(self, save_dir=None,
|
25 |
-
data_dict=None, logger=None, include=LOGGERS):
|
26 |
self.save_dir = save_dir
|
27 |
-
self.results_file = results_file
|
28 |
self.weights = weights
|
29 |
self.opt = opt
|
30 |
self.hyp = hyp
|
@@ -35,7 +35,7 @@ class Loggers():
|
|
35 |
setattr(self, k, None) # init empty logger dictionary
|
36 |
|
37 |
def start(self):
|
38 |
-
self.
|
39 |
|
40 |
# Message
|
41 |
try:
|
@@ -63,15 +63,19 @@ class Loggers():
|
|
63 |
|
64 |
return self
|
65 |
|
66 |
-
def on_train_batch_end(self, ni, model, imgs):
|
67 |
# Callback runs on train batch end
|
68 |
-
if
|
69 |
-
|
70 |
-
warnings.
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
75 |
|
76 |
def on_train_epoch_end(self, epoch):
|
77 |
# Callback runs on train epoch end
|
@@ -89,21 +93,28 @@ class Loggers():
|
|
89 |
files = sorted(self.save_dir.glob('val*.jpg'))
|
90 |
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
|
91 |
|
92 |
-
def on_train_val_end(self, mloss, results, lr, epoch,
|
93 |
-
# Callback runs on
|
94 |
-
vals = list(mloss
|
95 |
-
|
96 |
-
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
|
97 |
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
|
98 |
'x/lr0', 'x/lr1', 'x/lr2'] # params
|
99 |
-
|
100 |
-
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
if self.tb:
|
103 |
-
for
|
104 |
-
self.tb.add_scalar(
|
|
|
105 |
if self.wandb:
|
106 |
-
self.wandb.log(
|
107 |
self.wandb.end_epoch(best_result=best_fitness == fi)
|
108 |
|
109 |
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
|
@@ -112,8 +123,10 @@ class Loggers():
|
|
112 |
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
|
113 |
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
|
114 |
|
115 |
-
def on_train_end(self, last, best):
|
116 |
# Callback runs on training end
|
|
|
|
|
117 |
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
|
118 |
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
|
119 |
if self.wandb:
|
|
|
1 |
# YOLOv5 experiment logging utils
|
2 |
|
3 |
import warnings
|
4 |
+
from threading import Thread
|
5 |
|
6 |
import torch
|
7 |
from torch.utils.tensorboard import SummaryWriter
|
8 |
|
9 |
from utils.general import colorstr, emojis
|
10 |
from utils.loggers.wandb.wandb_utils import WandbLogger
|
11 |
+
from utils.plots import plot_images, plot_results
|
12 |
from utils.torch_utils import de_parallel
|
13 |
|
14 |
+
LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
|
15 |
|
16 |
try:
|
17 |
import wandb
|
|
|
23 |
|
24 |
class Loggers():
|
25 |
# YOLOv5 Loggers class
|
26 |
+
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, data_dict=None, logger=None, include=LOGGERS):
|
|
|
27 |
self.save_dir = save_dir
|
|
|
28 |
self.weights = weights
|
29 |
self.opt = opt
|
30 |
self.hyp = hyp
|
|
|
35 |
setattr(self, k, None) # init empty logger dictionary
|
36 |
|
37 |
def start(self):
|
38 |
+
self.csv = True # always log to csv
|
39 |
|
40 |
# Message
|
41 |
try:
|
|
|
63 |
|
64 |
return self
|
65 |
|
66 |
+
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots):
|
67 |
# Callback runs on train batch end
|
68 |
+
if plots:
|
69 |
+
if ni == 0:
|
70 |
+
with warnings.catch_warnings():
|
71 |
+
warnings.simplefilter('ignore') # suppress jit trace warning
|
72 |
+
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
|
73 |
+
if ni < 3:
|
74 |
+
f = self.save_dir / f'train_batch{ni}.jpg' # filename
|
75 |
+
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
|
76 |
+
if self.wandb and ni == 10:
|
77 |
+
files = sorted(self.save_dir.glob('train*.jpg'))
|
78 |
+
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
|
79 |
|
80 |
def on_train_epoch_end(self, epoch):
|
81 |
# Callback runs on train epoch end
|
|
|
93 |
files = sorted(self.save_dir.glob('val*.jpg'))
|
94 |
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
|
95 |
|
96 |
+
def on_train_val_end(self, mloss, results, lr, epoch, best_fitness, fi):
|
97 |
+
# Callback runs on val end during training
|
98 |
+
vals = list(mloss) + list(results) + lr
|
99 |
+
keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
|
100 |
+
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
|
101 |
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
|
102 |
'x/lr0', 'x/lr1', 'x/lr2'] # params
|
103 |
+
x = {k: v for k, v in zip(keys, vals)} # dict
|
104 |
+
|
105 |
+
if self.csv:
|
106 |
+
file = self.save_dir / 'results.csv'
|
107 |
+
n = len(x) + 1 # number of cols
|
108 |
+
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # add header
|
109 |
+
with open(file, 'a') as f:
|
110 |
+
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
|
111 |
+
|
112 |
if self.tb:
|
113 |
+
for k, v in x.items():
|
114 |
+
self.tb.add_scalar(k, v, epoch) # TensorBoard
|
115 |
+
|
116 |
if self.wandb:
|
117 |
+
self.wandb.log(x)
|
118 |
self.wandb.end_epoch(best_result=best_fitness == fi)
|
119 |
|
120 |
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
|
|
|
123 |
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
|
124 |
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
|
125 |
|
126 |
+
def on_train_end(self, last, best, plots):
|
127 |
# Callback runs on training end
|
128 |
+
if plots:
|
129 |
+
plot_results(dir=self.save_dir) # save results.png
|
130 |
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
|
131 |
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
|
132 |
if self.wandb:
|
utils/loss.py
CHANGED
@@ -162,8 +162,7 @@ class ComputeLoss:
|
|
162 |
lcls *= self.hyp['cls']
|
163 |
bs = tobj.shape[0] # batch size
|
164 |
|
165 |
-
|
166 |
-
return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
|
167 |
|
168 |
def build_targets(self, p, targets):
|
169 |
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
|
|
|
162 |
lcls *= self.hyp['cls']
|
163 |
bs = tobj.shape[0] # batch size
|
164 |
|
165 |
+
return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
|
|
|
166 |
|
167 |
def build_targets(self, p, targets):
|
168 |
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
|
utils/plots.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
# Plotting utils
|
2 |
|
3 |
-
import glob
|
4 |
-
import os
|
5 |
from copy import copy
|
6 |
from pathlib import Path
|
7 |
|
@@ -387,63 +385,29 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
|
|
387 |
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
|
388 |
|
389 |
|
390 |
-
def
|
391 |
-
# Plot training
|
392 |
-
|
393 |
-
t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
|
394 |
-
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
|
395 |
-
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
|
396 |
-
n = results.shape[1] # number of rows
|
397 |
-
x = range(start, min(stop, n) if stop else n)
|
398 |
-
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
|
399 |
-
ax = ax.ravel()
|
400 |
-
for i in range(5):
|
401 |
-
for j in [i, i + 5]:
|
402 |
-
y = results[j, x]
|
403 |
-
ax[i].plot(x, y, marker='.', label=s[j])
|
404 |
-
# y_smooth = butter_lowpass_filtfilt(y)
|
405 |
-
# ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
|
406 |
-
|
407 |
-
ax[i].set_title(t[i])
|
408 |
-
ax[i].legend()
|
409 |
-
ax[i].set_ylabel(f) if i == 0 else None # add filename
|
410 |
-
fig.savefig(f.replace('.txt', '.png'), dpi=200)
|
411 |
-
|
412 |
-
|
413 |
-
def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
|
414 |
-
# Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
|
415 |
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
|
416 |
ax = ax.ravel()
|
417 |
-
|
418 |
-
|
419 |
-
if bucket:
|
420 |
-
# files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
|
421 |
-
files = ['results%g.txt' % x for x in id]
|
422 |
-
c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
|
423 |
-
os.system(c)
|
424 |
-
else:
|
425 |
-
files = list(Path(save_dir).glob('results*.txt'))
|
426 |
-
assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
|
427 |
for fi, f in enumerate(files):
|
428 |
try:
|
429 |
-
|
430 |
-
|
431 |
-
x =
|
432 |
-
for i in
|
433 |
-
y =
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
|
439 |
-
ax[i].set_title(s[i])
|
440 |
-
# if i in [5, 6, 7]: # share train and val loss y axes
|
441 |
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
|
442 |
except Exception as e:
|
443 |
-
print('Warning: Plotting error for
|
444 |
-
|
445 |
ax[1].legend()
|
446 |
-
fig.savefig(
|
447 |
|
448 |
|
449 |
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
|
|
|
1 |
# Plotting utils
|
2 |
|
|
|
|
|
3 |
from copy import copy
|
4 |
from pathlib import Path
|
5 |
|
|
|
385 |
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
|
386 |
|
387 |
|
388 |
+
def plot_results(file='', dir=''):
|
389 |
+
# Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
|
390 |
+
save_dir = Path(file).parent if file else Path(dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
391 |
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
|
392 |
ax = ax.ravel()
|
393 |
+
files = list(save_dir.glob('results*.csv'))
|
394 |
+
assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
395 |
for fi, f in enumerate(files):
|
396 |
try:
|
397 |
+
data = pd.read_csv(f)
|
398 |
+
s = [x.strip() for x in data.columns]
|
399 |
+
x = data.values[:, 0]
|
400 |
+
for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
|
401 |
+
y = data.values[:, j]
|
402 |
+
# y[y == 0] = np.nan # don't show zero values
|
403 |
+
ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
|
404 |
+
ax[i].set_title(s[j], fontsize=12)
|
405 |
+
# if j in [8, 9, 10]: # share train and val loss y axes
|
|
|
|
|
|
|
406 |
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
|
407 |
except Exception as e:
|
408 |
+
print(f'Warning: Plotting error for {f}: {e}')
|
|
|
409 |
ax[1].legend()
|
410 |
+
fig.savefig(save_dir / 'results.png', dpi=200)
|
411 |
|
412 |
|
413 |
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
|
val.py
CHANGED
@@ -171,7 +171,7 @@ def run(data,
|
|
171 |
|
172 |
# Compute loss
|
173 |
if compute_loss:
|
174 |
-
loss += compute_loss([x.float() for x in train_out], targets)[1]
|
175 |
|
176 |
# Run NMS
|
177 |
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
|
|
|
171 |
|
172 |
# Compute loss
|
173 |
if compute_loss:
|
174 |
+
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
|
175 |
|
176 |
# Run NMS
|
177 |
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
|