glenn-jocher
commited on
Commit
•
e5b0200
1
Parent(s):
2b329b0
Update tensorboard>=2.4.1 (#2576)
Browse files* Update tensorboard>=2.4.1
Update tensorboard version to attempt to address https://github.com/ultralytics/yolov5/issues/2573 (tensorboard logging fail in Docker image).
* cleanup
- requirements.txt +1 -1
- train.py +5 -5
requirements.txt
CHANGED
@@ -8,12 +8,12 @@ opencv-python>=4.1.2
|
|
8 |
Pillow
|
9 |
PyYAML>=5.3.1
|
10 |
scipy>=1.4.1
|
11 |
-
tensorboard>=2.2
|
12 |
torch>=1.7.0
|
13 |
torchvision>=0.8.1
|
14 |
tqdm>=4.41.0
|
15 |
|
16 |
# logging -------------------------------------
|
|
|
17 |
# wandb
|
18 |
|
19 |
# plotting ------------------------------------
|
|
|
8 |
Pillow
|
9 |
PyYAML>=5.3.1
|
10 |
scipy>=1.4.1
|
|
|
11 |
torch>=1.7.0
|
12 |
torchvision>=0.8.1
|
13 |
tqdm>=4.41.0
|
14 |
|
15 |
# logging -------------------------------------
|
16 |
+
tensorboard>=2.4.1
|
17 |
# wandb
|
18 |
|
19 |
# plotting ------------------------------------
|
train.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
import argparse
|
3 |
import logging
|
4 |
import math
|
@@ -34,7 +33,7 @@ from utils.google_utils import attempt_download
|
|
34 |
from utils.loss import ComputeLoss
|
35 |
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
|
36 |
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
|
37 |
-
from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id
|
38 |
|
39 |
logger = logging.getLogger(__name__)
|
40 |
|
@@ -75,7 +74,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|
75 |
data_dict = wandb_logger.data_dict
|
76 |
if wandb_logger.wandb:
|
77 |
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
|
78 |
-
|
79 |
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
|
80 |
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
81 |
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
|
@@ -405,7 +404,7 @@ def train(hyp, opt, device, tb_writer=None):
|
|
405 |
wandb_logger.log_model(
|
406 |
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
|
407 |
del ckpt
|
408 |
-
|
409 |
# end epoch ----------------------------------------------------------------------------------------------------
|
410 |
# end training
|
411 |
if rank in [-1, 0]:
|
@@ -534,7 +533,8 @@ if __name__ == '__main__':
|
|
534 |
if not opt.evolve:
|
535 |
tb_writer = None # init loggers
|
536 |
if opt.global_rank in [-1, 0]:
|
537 |
-
|
|
|
538 |
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
|
539 |
train(hyp, opt, device, tb_writer)
|
540 |
|
|
|
|
|
1 |
import argparse
|
2 |
import logging
|
3 |
import math
|
|
|
33 |
from utils.loss import ComputeLoss
|
34 |
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
|
35 |
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
|
36 |
+
from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id
|
37 |
|
38 |
logger = logging.getLogger(__name__)
|
39 |
|
|
|
74 |
data_dict = wandb_logger.data_dict
|
75 |
if wandb_logger.wandb:
|
76 |
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
|
77 |
+
|
78 |
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
|
79 |
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
80 |
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
|
|
|
404 |
wandb_logger.log_model(
|
405 |
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
|
406 |
del ckpt
|
407 |
+
|
408 |
# end epoch ----------------------------------------------------------------------------------------------------
|
409 |
# end training
|
410 |
if rank in [-1, 0]:
|
|
|
533 |
if not opt.evolve:
|
534 |
tb_writer = None # init loggers
|
535 |
if opt.global_rank in [-1, 0]:
|
536 |
+
prefix = colorstr('tensorboard: ')
|
537 |
+
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
|
538 |
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
|
539 |
train(hyp, opt, device, tb_writer)
|
540 |
|