igornishka glenn-jocher commited on
Commit
44f42b1
1 Parent(s): 7aeef2d

changed prints to logging in utils/datasets (#1315)

Browse files

Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>

Files changed (1) hide show
  1. utils/datasets.py +14 -12
utils/datasets.py CHANGED
@@ -1,6 +1,7 @@
1
  # Dataset utils and dataloaders
2
 
3
  import glob
 
4
  import math
5
  import os
6
  import random
@@ -21,6 +22,8 @@ from tqdm import tqdm
21
  from utils.general import xyxy2xywh, xywh2xyxy
22
  from utils.torch_utils import torch_distributed_zero_first
23
 
 
 
24
  # Parameters
25
  help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
26
  img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
@@ -165,14 +168,14 @@ class LoadImages: # for inference
165
  ret_val, img0 = self.cap.read()
166
 
167
  self.frame += 1
168
- print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
169
 
170
  else:
171
  # Read image
172
  self.count += 1
173
  img0 = cv2.imread(path) # BGR
174
  assert img0 is not None, 'Image Not Found ' + path
175
- print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
176
 
177
  # Padded resize
178
  img = letterbox(img0, new_shape=self.img_size)[0]
@@ -234,7 +237,7 @@ class LoadWebcam: # for inference
234
  # Print
235
  assert ret_val, 'Camera Error %s' % self.pipe
236
  img_path = 'webcam.jpg'
237
- print('webcam %g: ' % self.count, end='')
238
 
239
  # Padded resize
240
  img = letterbox(img0, new_shape=self.img_size)[0]
@@ -265,7 +268,7 @@ class LoadStreams: # multiple IP or RTSP cameras
265
  self.sources = sources
266
  for i, s in enumerate(sources):
267
  # Start the thread to read frames from the video stream
268
- print('%g/%g: %s... ' % (i + 1, n, s), end='')
269
  cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
270
  assert cap.isOpened(), 'Failed to open %s' % s
271
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
@@ -273,15 +276,14 @@ class LoadStreams: # multiple IP or RTSP cameras
273
  fps = cap.get(cv2.CAP_PROP_FPS) % 100
274
  _, self.imgs[i] = cap.read() # guarantee first frame
275
  thread = Thread(target=self.update, args=([i, cap]), daemon=True)
276
- print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
277
  thread.start()
278
- print('') # newline
279
 
280
  # check for common shapes
281
  s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
282
  self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
283
  if not self.rect:
284
- print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
285
 
286
  def update(self, index, cap):
287
  # Read next stream frame in a daemon thread
@@ -418,7 +420,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
418
  assert (l >= 0).all(), 'negative labels: %s' % file
419
  assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
420
  if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
421
- nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
422
  if single_cls:
423
  l[:, 0] = 0 # force dataset into single-class mode
424
  self.labels[i] = l
@@ -455,7 +457,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
455
  b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
456
  assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
457
  else:
458
- ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
459
  # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
460
 
461
  if rank in [-1, 0]:
@@ -463,7 +465,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
463
  cache_path, nf, nm, ne, nd, n)
464
  if nf == 0:
465
  s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
466
- print(s)
467
  assert not augment, '%s. Can not train without labels.' % s
468
 
469
  # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
@@ -496,7 +498,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
496
  l = np.zeros((0, 5), dtype=np.float32)
497
  x[img] = [l, shape]
498
  except Exception as e:
499
- print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e))
500
 
501
  x['hash'] = get_hash(self.label_files + self.img_files)
502
  torch.save(x, path) # save for next time
@@ -507,7 +509,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
507
 
508
  # def __iter__(self):
509
  # self.count = -1
510
- # print('ran dataset iter')
511
  # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
512
  # return self
513
 
 
1
  # Dataset utils and dataloaders
2
 
3
  import glob
4
+ import logging
5
  import math
6
  import os
7
  import random
 
22
  from utils.general import xyxy2xywh, xywh2xyxy
23
  from utils.torch_utils import torch_distributed_zero_first
24
 
25
+ logger = logging.getLogger(__name__)
26
+
27
  # Parameters
28
  help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
29
  img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
 
168
  ret_val, img0 = self.cap.read()
169
 
170
  self.frame += 1
171
+ logger.debug('video %g/%g (%g/%g) %s: ', self.count + 1, self.nf, self.frame, self.nframes, path)
172
 
173
  else:
174
  # Read image
175
  self.count += 1
176
  img0 = cv2.imread(path) # BGR
177
  assert img0 is not None, 'Image Not Found ' + path
178
+ logger.debug('image %g/%g %s: ', self.count, self.nf, path)
179
 
180
  # Padded resize
181
  img = letterbox(img0, new_shape=self.img_size)[0]
 
237
  # Print
238
  assert ret_val, 'Camera Error %s' % self.pipe
239
  img_path = 'webcam.jpg'
240
+ logger.debug('webcam %g: ', self.count)
241
 
242
  # Padded resize
243
  img = letterbox(img0, new_shape=self.img_size)[0]
 
268
  self.sources = sources
269
  for i, s in enumerate(sources):
270
  # Start the thread to read frames from the video stream
271
+ logger.debug('%g/%g: %s... ', i + 1, n, s)
272
  cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
273
  assert cap.isOpened(), 'Failed to open %s' % s
274
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
 
276
  fps = cap.get(cv2.CAP_PROP_FPS) % 100
277
  _, self.imgs[i] = cap.read() # guarantee first frame
278
  thread = Thread(target=self.update, args=([i, cap]), daemon=True)
279
+ logger.debug(' success (%gx%g at %.2f FPS).', w, h, fps)
280
  thread.start()
 
281
 
282
  # check for common shapes
283
  s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
284
  self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
285
  if not self.rect:
286
+ logger.warning('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
287
 
288
  def update(self, index, cap):
289
  # Read next stream frame in a daemon thread
 
420
  assert (l >= 0).all(), 'negative labels: %s' % file
421
  assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
422
  if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
423
+ nd += 1 # logger.warning('WARNING: duplicate rows in %s', self.label_files[i]) # duplicate rows
424
  if single_cls:
425
  l[:, 0] = 0 # force dataset into single-class mode
426
  self.labels[i] = l
 
457
  b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
458
  assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
459
  else:
460
+ ne += 1 # logger.info('empty labels for image %s', self.img_files[i]) # file empty
461
  # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
462
 
463
  if rank in [-1, 0]:
 
465
  cache_path, nf, nm, ne, nd, n)
466
  if nf == 0:
467
  s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
468
+ logger.info(s)
469
  assert not augment, '%s. Can not train without labels.' % s
470
 
471
  # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
 
498
  l = np.zeros((0, 5), dtype=np.float32)
499
  x[img] = [l, shape]
500
  except Exception as e:
501
+ logger.warning('WARNING: Ignoring corrupted image and/or label %s: %s', img, e)
502
 
503
  x['hash'] = get_hash(self.label_files + self.img_files)
504
  torch.save(x, path) # save for next time
 
509
 
510
  # def __iter__(self):
511
  # self.count = -1
512
+ # logger.info('ran dataset iter')
513
  # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
514
  # return self
515