Ayush Chaurasia pre-commit-ci[bot] glenn-jocher commited on
Commit
4cac1cf
1 Parent(s): f62609e

Add `--noplots` flag to suppress figures and images logging (#7534)

Browse files

* support nomedia

* support nomedia for validation

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* Update train.py

* Revert no plot evolve

evolve plots do not contain any images

* Revert plot_results

contains no media

* Update wandb_utils.py

* sync-bn cleanup

* Cleanup

* Rename nomedia -> noplots

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>

train.py CHANGED
@@ -100,7 +100,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
100
  callbacks.register_action(k, callback=getattr(loggers, k))
101
 
102
  # Config
103
- plots = not evolve # create plots
104
  cuda = device.type != 'cpu'
105
  init_seeds(1 + RANK)
106
  with torch_distributed_zero_first(LOCAL_RANK):
@@ -373,7 +373,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio
373
  mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
374
  pbar.set_description(('%10s' * 2 + '%10.4g' * 5) %
375
  (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
376
- callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn)
377
  if callbacks.stop_training:
378
  return
379
  # end batch ------------------------------------------------------------------------------------------------
@@ -488,6 +488,7 @@ def parse_opt(known=False):
488
  parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
489
  parser.add_argument('--noval', action='store_true', help='only validate final epoch')
490
  parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
 
491
  parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
492
  parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
493
  parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
 
100
  callbacks.register_action(k, callback=getattr(loggers, k))
101
 
102
  # Config
103
+ plots = not evolve and not opt.noplots # create plots
104
  cuda = device.type != 'cpu'
105
  init_seeds(1 + RANK)
106
  with torch_distributed_zero_first(LOCAL_RANK):
 
373
  mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
374
  pbar.set_description(('%10s' * 2 + '%10.4g' * 5) %
375
  (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
376
+ callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots)
377
  if callbacks.stop_training:
378
  return
379
  # end batch ------------------------------------------------------------------------------------------------
 
488
  parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
489
  parser.add_argument('--noval', action='store_true', help='only validate final epoch')
490
  parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
491
+ parser.add_argument('--noplots', action='store_true', help='save no plot files')
492
  parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
493
  parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
494
  parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
utils/loggers/__init__.py CHANGED
@@ -99,11 +99,11 @@ class Loggers():
99
  if self.wandb:
100
  self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
101
 
102
- def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
103
  # Callback runs on train batch end
104
  if plots:
105
  if ni == 0:
106
- if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
107
  with warnings.catch_warnings():
108
  warnings.simplefilter('ignore') # suppress jit trace warning
109
  self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
 
99
  if self.wandb:
100
  self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
101
 
102
+ def on_train_batch_end(self, ni, model, imgs, targets, paths, plots):
103
  # Callback runs on train batch end
104
  if plots:
105
  if ni == 0:
106
+ if not self.opt.sync_bn: # --sync known issue https://github.com/ultralytics/yolov5/issues/3754
107
  with warnings.catch_warnings():
108
  warnings.simplefilter('ignore') # suppress jit trace warning
109
  self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
utils/loggers/wandb/wandb_utils.py CHANGED
@@ -250,8 +250,8 @@ class WandbLogger():
250
  self.map_val_table_path()
251
  if opt.bbox_interval == -1:
252
  self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
253
- if opt.evolve:
254
- self.bbox_interval = opt.bbox_interval = opt.epochs + 1
255
  train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None
256
  # Update the the data_dict to point to local artifacts dir
257
  if train_from_artifact:
 
250
  self.map_val_table_path()
251
  if opt.bbox_interval == -1:
252
  self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
253
+ if opt.evolve or opt.noplots:
254
+ self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval
255
  train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None
256
  # Update the the data_dict to point to local artifacts dir
257
  if train_from_artifact: