glenn-jocher commited on
Commit
9c91aea
·
unverified ·
1 Parent(s): b7007d0

W&B logging add hyperparameters (#1399)

Browse files

* W&B logging add hyperparameters

* hyp bug fix and image logging updates

* if plots and wandb:

* cleanup

* wandb/ gitignore add

* cleanup 2

* cleanup 3

* move wandb import to top of file

* wandb evolve

* update import

* wandb.run.finish()

* default anchors: 3

Files changed (5) hide show
  1. .gitignore +2 -0
  2. data/hyp.scratch.yaml +1 -1
  3. test.py +16 -14
  4. train.py +37 -29
  5. utils/plots.py +4 -4
.gitignore CHANGED
@@ -79,9 +79,11 @@ sdist/
79
  var/
80
  wheels/
81
  *.egg-info/
 
82
  .installed.cfg
83
  *.egg
84
 
 
85
  # PyInstaller
86
  # Usually these files are written by a python script from a template
87
  # before PyInstaller builds the exe, so as to inject date/other infos into it.
 
79
  var/
80
  wheels/
81
  *.egg-info/
82
+ wandb/
83
  .installed.cfg
84
  *.egg
85
 
86
+
87
  # PyInstaller
88
  # Usually these files are written by a python script from a template
89
  # before PyInstaller builds the exe, so as to inject date/other infos into it.
data/hyp.scratch.yaml CHANGED
@@ -17,7 +17,7 @@ obj: 1.0 # obj loss gain (scale with pixels)
17
  obj_pw: 1.0 # obj BCELoss positive_weight
18
  iou_t: 0.20 # IoU training threshold
19
  anchor_t: 4.0 # anchor-multiple threshold
20
- # anchors: 0 # anchors per output grid (0 to ignore)
21
  fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22
  hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23
  hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
 
17
  obj_pw: 1.0 # obj BCELoss positive_weight
18
  iou_t: 0.20 # IoU training threshold
19
  anchor_t: 4.0 # anchor-multiple threshold
20
+ # anchors: 3 # anchors per output layer (0 to ignore)
21
  fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
22
  hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
23
  hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
test.py CHANGED
@@ -75,7 +75,7 @@ def test(data,
75
  niou = iouv.numel()
76
 
77
  # Logging
78
- log_imgs = min(log_imgs, 100) # ceil
79
  try:
80
  import wandb # Weights & Biases
81
  except ImportError:
@@ -132,6 +132,7 @@ def test(data,
132
  continue
133
 
134
  # Append to text file
 
135
  if save_txt:
136
  gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
137
  x = pred.clone()
@@ -139,18 +140,18 @@ def test(data,
139
  for *xyxy, conf, cls in x:
140
  xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
141
  line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
142
- with open(str(save_dir / 'labels' / Path(paths[si]).stem) + '.txt', 'a') as f:
143
  f.write(('%g ' * len(line)).rstrip() % line + '\n')
144
 
145
  # W&B logging
146
- if len(wandb_images) < log_imgs:
147
  box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
148
  "class_id": int(cls),
149
  "box_caption": "%s %.3f" % (names[cls], conf),
150
  "scores": {"class_score": conf},
151
- "domain": "pixel"} for *xyxy, conf, cls in pred.clone().tolist()]
152
  boxes = {"predictions": {"box_data": box_data, "class_labels": names}}
153
- wandb_images.append(wandb.Image(img[si], boxes=boxes))
154
 
155
  # Clip boxes to image bounds
156
  clip_coords(pred, (height, width))
@@ -158,13 +159,13 @@ def test(data,
158
  # Append to pycocotools JSON dictionary
159
  if save_json:
160
  # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
161
- image_id = Path(paths[si]).stem
162
  box = pred[:, :4].clone() # xyxy
163
  scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
164
  box = xyxy2xywh(box) # xywh
165
  box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
166
  for p, b in zip(pred.tolist(), box.tolist()):
167
- jdict.append({'image_id': int(image_id) if image_id.isnumeric() else image_id,
168
  'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
169
  'bbox': [round(x, 3) for x in b],
170
  'score': round(p[4], 5)})
@@ -203,15 +204,11 @@ def test(data,
203
  stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
204
 
205
  # Plot images
206
- if plots and batch_i < 1:
207
  f = save_dir / f'test_batch{batch_i}_labels.jpg' # filename
208
- plot_images(img, targets, paths, str(f), names) # labels
209
  f = save_dir / f'test_batch{batch_i}_pred.jpg'
210
- plot_images(img, output_to_target(output, width, height), paths, str(f), names) # predictions
211
-
212
- # W&B logging
213
- if wandb_images:
214
- wandb.log({"outputs": wandb_images})
215
 
216
  # Compute statistics
217
  stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
@@ -223,6 +220,11 @@ def test(data,
223
  else:
224
  nt = torch.zeros(1)
225
 
 
 
 
 
 
226
  # Print results
227
  pf = '%20s' + '%12.3g' * 6 # print format
228
  print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
 
75
  niou = iouv.numel()
76
 
77
  # Logging
78
+ log_imgs, wandb = min(log_imgs, 100), None # ceil
79
  try:
80
  import wandb # Weights & Biases
81
  except ImportError:
 
132
  continue
133
 
134
  # Append to text file
135
+ path = Path(paths[si])
136
  if save_txt:
137
  gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
138
  x = pred.clone()
 
140
  for *xyxy, conf, cls in x:
141
  xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
142
  line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
143
+ with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
144
  f.write(('%g ' * len(line)).rstrip() % line + '\n')
145
 
146
  # W&B logging
147
+ if plots and len(wandb_images) < log_imgs:
148
  box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
149
  "class_id": int(cls),
150
  "box_caption": "%s %.3f" % (names[cls], conf),
151
  "scores": {"class_score": conf},
152
+ "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
153
  boxes = {"predictions": {"box_data": box_data, "class_labels": names}}
154
+ wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
155
 
156
  # Clip boxes to image bounds
157
  clip_coords(pred, (height, width))
 
159
  # Append to pycocotools JSON dictionary
160
  if save_json:
161
  # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
162
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
163
  box = pred[:, :4].clone() # xyxy
164
  scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
165
  box = xyxy2xywh(box) # xywh
166
  box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
167
  for p, b in zip(pred.tolist(), box.tolist()):
168
+ jdict.append({'image_id': image_id,
169
  'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
170
  'bbox': [round(x, 3) for x in b],
171
  'score': round(p[4], 5)})
 
204
  stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
205
 
206
  # Plot images
207
+ if plots and batch_i < 3:
208
  f = save_dir / f'test_batch{batch_i}_labels.jpg' # filename
209
+ plot_images(img, targets, paths, f, names) # labels
210
  f = save_dir / f'test_batch{batch_i}_pred.jpg'
211
+ plot_images(img, output_to_target(output, width, height), paths, f, names) # predictions
 
 
 
 
212
 
213
  # Compute statistics
214
  stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
 
220
  else:
221
  nt = torch.zeros(1)
222
 
223
+ # W&B logging
224
+ if plots and wandb:
225
+ wandb.log({"Images": wandb_images})
226
+ wandb.log({"Validation": [wandb.Image(str(x), caption=x.name) for x in sorted(save_dir.glob('test*.jpg'))]})
227
+
228
  # Print results
229
  pf = '%20s' + '%12.3g' * 6 # print format
230
  print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
train.py CHANGED
@@ -34,6 +34,12 @@ from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_di
34
 
35
  logger = logging.getLogger(__name__)
36
 
 
 
 
 
 
 
37
 
38
  def train(hyp, opt, device, tb_writer=None, wandb=None):
39
  logger.info(f'Hyperparameters {hyp}')
@@ -54,6 +60,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
54
  yaml.dump(vars(opt), f, sort_keys=False)
55
 
56
  # Configure
 
57
  cuda = device.type != 'cpu'
58
  init_seeds(2 + rank)
59
  with open(opt.data) as f:
@@ -122,6 +129,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
122
 
123
  # Logging
124
  if wandb and wandb.run is None:
 
125
  wandb_run = wandb.init(config=opt, resume="allow",
126
  project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
127
  name=save_dir.stem,
@@ -164,7 +172,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
164
  model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
165
  logger.info('Using SyncBatchNorm()')
166
 
167
- # Exponential moving average
168
  ema = ModelEMA(model) if rank in [-1, 0] else None
169
 
170
  # DDP mode
@@ -191,10 +199,12 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
191
  c = torch.tensor(labels[:, 0]) # classes
192
  # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
193
  # model._initialize_biases(cf.to(device))
194
- plot_labels(labels, save_dir=save_dir)
195
- if tb_writer:
196
- # tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
197
- tb_writer.add_histogram('classes', c, 0)
 
 
198
 
199
  # Anchors
200
  if not opt.noautoanchor:
@@ -298,14 +308,17 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
298
  pbar.set_description(s)
299
 
300
  # Plot
301
- if ni < 3:
302
- f = str(save_dir / f'train_batch{ni}.jpg') # filename
303
- result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
304
- # if tb_writer and result is not None:
305
- # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
306
- # tb_writer.add_graph(model, imgs) # add model to tensorboard
 
 
307
 
308
  # end batch ------------------------------------------------------------------------------------------------
 
309
 
310
  # Scheduler
311
  lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
@@ -325,7 +338,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
325
  single_cls=opt.single_cls,
326
  dataloader=testloader,
327
  save_dir=save_dir,
328
- plots=epoch == 0 or final_epoch, # plot first and last
329
  log_imgs=opt.log_imgs if wandb else 0)
330
 
331
  # Write
@@ -380,11 +393,16 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
380
  strip_optimizer(f2) # strip optimizer
381
  os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload
382
  # Finish
383
- if not opt.evolve:
384
  plot_results(save_dir=save_dir) # save as results.png
 
 
 
385
  logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
 
 
386
 
387
- dist.destroy_process_group() if rank not in [-1, 0] else None
388
  torch.cuda.empty_cache()
389
  return results
390
 
@@ -413,7 +431,7 @@ if __name__ == '__main__':
413
  parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
414
  parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
415
  parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
416
- parser.add_argument('--log-imgs', type=int, default=10, help='number of images for W&B logging, max 100')
417
  parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
418
  parser.add_argument('--project', default='runs/train', help='save to project/name')
419
  parser.add_argument('--name', default='exp', help='save to project/name')
@@ -442,7 +460,7 @@ if __name__ == '__main__':
442
  assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
443
  opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
444
  opt.name = 'evolve' if opt.evolve else opt.name
445
- opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
446
 
447
  # DDP mode
448
  device = select_device(opt.device, batch_size=opt.batch_size)
@@ -465,20 +483,10 @@ if __name__ == '__main__':
465
  # Train
466
  logger.info(opt)
467
  if not opt.evolve:
468
- tb_writer, wandb = None, None # init loggers
469
  if opt.global_rank in [-1, 0]:
470
- # Tensorboard
471
  logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
472
- tb_writer = SummaryWriter(opt.save_dir) # runs/train/exp
473
-
474
- # W&B
475
- try:
476
- import wandb
477
-
478
- assert os.environ.get('WANDB_DISABLED') != 'true'
479
- except (ImportError, AssertionError):
480
- logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
481
-
482
  train(hyp, opt, device, tb_writer, wandb)
483
 
484
  # Evolve hyperparameters (optional)
@@ -553,7 +561,7 @@ if __name__ == '__main__':
553
  hyp[k] = round(hyp[k], 5) # significant digits
554
 
555
  # Train mutation
556
- results = train(hyp.copy(), opt, device)
557
 
558
  # Write mutation results
559
  print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
 
34
 
35
  logger = logging.getLogger(__name__)
36
 
37
+ try:
38
+ import wandb
39
+ except ImportError:
40
+ wandb = None
41
+ logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
42
+
43
 
44
  def train(hyp, opt, device, tb_writer=None, wandb=None):
45
  logger.info(f'Hyperparameters {hyp}')
 
60
  yaml.dump(vars(opt), f, sort_keys=False)
61
 
62
  # Configure
63
+ plots = not opt.evolve # create plots
64
  cuda = device.type != 'cpu'
65
  init_seeds(2 + rank)
66
  with open(opt.data) as f:
 
129
 
130
  # Logging
131
  if wandb and wandb.run is None:
132
+ opt.hyp = hyp # add hyperparameters
133
  wandb_run = wandb.init(config=opt, resume="allow",
134
  project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
135
  name=save_dir.stem,
 
172
  model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
173
  logger.info('Using SyncBatchNorm()')
174
 
175
+ # EMA
176
  ema = ModelEMA(model) if rank in [-1, 0] else None
177
 
178
  # DDP mode
 
199
  c = torch.tensor(labels[:, 0]) # classes
200
  # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
201
  # model._initialize_biases(cf.to(device))
202
+ if plots:
203
+ plot_labels(labels, save_dir=save_dir)
204
+ if tb_writer:
205
+ tb_writer.add_histogram('classes', c, 0)
206
+ if wandb:
207
+ wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.png')]})
208
 
209
  # Anchors
210
  if not opt.noautoanchor:
 
308
  pbar.set_description(s)
309
 
310
  # Plot
311
+ if plots and ni < 3:
312
+ f = save_dir / f'train_batch{ni}.jpg' # filename
313
+ plot_images(images=imgs, targets=targets, paths=paths, fname=f)
314
+ # if tb_writer:
315
+ # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
316
+ # tb_writer.add_graph(model, imgs) # add model to tensorboard
317
+ elif plots and ni == 3 and wandb:
318
+ wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]})
319
 
320
  # end batch ------------------------------------------------------------------------------------------------
321
+ # end epoch ----------------------------------------------------------------------------------------------------
322
 
323
  # Scheduler
324
  lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
 
338
  single_cls=opt.single_cls,
339
  dataloader=testloader,
340
  save_dir=save_dir,
341
+ plots=plots and final_epoch,
342
  log_imgs=opt.log_imgs if wandb else 0)
343
 
344
  # Write
 
393
  strip_optimizer(f2) # strip optimizer
394
  os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload
395
  # Finish
396
+ if plots:
397
  plot_results(save_dir=save_dir) # save as results.png
398
+ if wandb:
399
+ wandb.log({"Results": [wandb.Image(str(save_dir / x), caption=x) for x in
400
+ ['results.png', 'precision-recall_curve.png']]})
401
  logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
402
+ else:
403
+ dist.destroy_process_group()
404
 
405
+ wandb.run.finish() if wandb and wandb.run else None
406
  torch.cuda.empty_cache()
407
  return results
408
 
 
431
  parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
432
  parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
433
  parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
434
+ parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
435
  parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
436
  parser.add_argument('--project', default='runs/train', help='save to project/name')
437
  parser.add_argument('--name', default='exp', help='save to project/name')
 
460
  assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
461
  opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
462
  opt.name = 'evolve' if opt.evolve else opt.name
463
+ opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
464
 
465
  # DDP mode
466
  device = select_device(opt.device, batch_size=opt.batch_size)
 
483
  # Train
484
  logger.info(opt)
485
  if not opt.evolve:
486
+ tb_writer = None # init loggers
487
  if opt.global_rank in [-1, 0]:
 
488
  logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
489
+ tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
 
 
 
 
 
 
 
 
 
490
  train(hyp, opt, device, tb_writer, wandb)
491
 
492
  # Evolve hyperparameters (optional)
 
561
  hyp[k] = round(hyp[k], 5) # significant digits
562
 
563
  # Train mutation
564
+ results = train(hyp.copy(), opt, device, wandb=wandb)
565
 
566
  # Write mutation results
567
  print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
utils/plots.py CHANGED
@@ -158,13 +158,13 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
158
  cls = int(classes[j])
159
  color = colors[cls % len(colors)]
160
  cls = names[cls] if names else cls
161
- if labels or conf[j] > 0.3: # 0.3 conf thresh
162
  label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
163
  plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
164
 
165
  # Draw image filename labels
166
- if paths is not None:
167
- label = os.path.basename(paths[i])[:40] # trim to 40 char
168
  t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
169
  cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
170
  lineType=cv2.LINE_AA)
@@ -172,7 +172,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
172
  # Image border
173
  cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
174
 
175
- if fname is not None:
176
  r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
177
  mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
178
  # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
 
158
  cls = int(classes[j])
159
  color = colors[cls % len(colors)]
160
  cls = names[cls] if names else cls
161
+ if labels or conf[j] > 0.25: # 0.25 conf thresh
162
  label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
163
  plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
164
 
165
  # Draw image filename labels
166
+ if paths:
167
+ label = Path(paths[i]).name[:40] # trim to 40 char
168
  t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
169
  cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
170
  lineType=cv2.LINE_AA)
 
172
  # Image border
173
  cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
174
 
175
+ if fname:
176
  r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
177
  mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
178
  # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save