Ben Milanko glenn-jocher commited on
Commit
e2b7bc0
1 Parent(s): 9029759

YouTube Livestream Detection (#2752)

Browse files

* Youtube livestream detection

* dependancy update to auto install pafy

* Remove print

* include youtube_dl in deps

* PEP8 reformat

* youtube url check fix

* reduce lines

* add comment

* update check_requirements

* stream framerate fix

* Update README.md

* cleanup

* PEP8

* remove cap.retrieve() failure code

Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>

Files changed (4) hide show
  1. README.md +2 -3
  2. detect.py +1 -1
  3. utils/datasets.py +15 -8
  4. utils/general.py +12 -8
README.md CHANGED
@@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam
92
  file.mp4 # video
93
  path/ # directory
94
  path/*.jpg # glob
95
- rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream
96
- rtmp://192.168.1.105/live/test # rtmp stream
97
- http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream
98
  ```
99
 
100
  To run inference on example images in `data/images`:
 
92
  file.mp4 # video
93
  path/ # directory
94
  path/*.jpg # glob
95
+ 'https://youtu.be/NUsoVlDFqZg' # YouTube video
96
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
 
97
  ```
98
 
99
  To run inference on example images in `data/images`:
detect.py CHANGED
@@ -19,7 +19,7 @@ def detect(save_img=False):
19
  source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
20
  save_img = not opt.nosave and not source.endswith('.txt') # save inference images
21
  webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
22
- ('rtsp://', 'rtmp://', 'http://'))
23
 
24
  # Directories
25
  save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
 
19
  source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
20
  save_img = not opt.nosave and not source.endswith('.txt') # save inference images
21
  webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
22
+ ('rtsp://', 'rtmp://', 'http://', 'https://'))
23
 
24
  # Directories
25
  save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
utils/datasets.py CHANGED
@@ -20,8 +20,8 @@ from PIL import Image, ExifTags
20
  from torch.utils.data import Dataset
21
  from tqdm import tqdm
22
 
23
- from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \
24
- clean_str
25
  from utils.torch_utils import torch_distributed_zero_first
26
 
27
  # Parameters
@@ -275,14 +275,20 @@ class LoadStreams: # multiple IP or RTSP cameras
275
  for i, s in enumerate(sources):
276
  # Start the thread to read frames from the video stream
277
  print(f'{i + 1}/{n}: {s}... ', end='')
278
- cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
 
 
 
 
 
279
  assert cap.isOpened(), f'Failed to open {s}'
280
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
281
  h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
282
- fps = cap.get(cv2.CAP_PROP_FPS) % 100
 
283
  _, self.imgs[i] = cap.read() # guarantee first frame
284
  thread = Thread(target=self.update, args=([i, cap]), daemon=True)
285
- print(f' success ({w}x{h} at {fps:.2f} FPS).')
286
  thread.start()
287
  print('') # newline
288
 
@@ -303,7 +309,7 @@ class LoadStreams: # multiple IP or RTSP cameras
303
  success, im = cap.retrieve()
304
  self.imgs[index] = im if success else self.imgs[index] * 0
305
  n = 0
306
- time.sleep(0.01) # wait time
307
 
308
  def __iter__(self):
309
  self.count = -1
@@ -444,7 +450,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
444
  gb += self.imgs[i].nbytes
445
  pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
446
  pbar.close()
447
-
448
  def cache_labels(self, path=Path('./labels.cache'), prefix=''):
449
  # Cache dataset labels, check images and read shapes
450
  x = {} # dict
@@ -489,7 +495,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
489
  pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
490
  f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
491
  pbar.close()
492
-
493
  if nf == 0:
494
  print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
495
 
@@ -1034,6 +1040,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_
1034
  b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
1035
  assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
1036
 
 
1037
  def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
1038
  """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
1039
  Usage: from utils.datasets import *; autosplit('../coco128')
 
20
  from torch.utils.data import Dataset
21
  from tqdm import tqdm
22
 
23
+ from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
24
+ resample_segments, clean_str
25
  from utils.torch_utils import torch_distributed_zero_first
26
 
27
  # Parameters
 
275
  for i, s in enumerate(sources):
276
  # Start the thread to read frames from the video stream
277
  print(f'{i + 1}/{n}: {s}... ', end='')
278
+ url = eval(s) if s.isnumeric() else s
279
+ if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video
280
+ check_requirements(('pafy', 'youtube_dl'))
281
+ import pafy
282
+ url = pafy.new(url).getbest(preftype="mp4").url
283
+ cap = cv2.VideoCapture(url)
284
  assert cap.isOpened(), f'Failed to open {s}'
285
  w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
286
  h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
287
+ self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
288
+
289
  _, self.imgs[i] = cap.read() # guarantee first frame
290
  thread = Thread(target=self.update, args=([i, cap]), daemon=True)
291
+ print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
292
  thread.start()
293
  print('') # newline
294
 
 
309
  success, im = cap.retrieve()
310
  self.imgs[index] = im if success else self.imgs[index] * 0
311
  n = 0
312
+ time.sleep(1 / self.fps) # wait time
313
 
314
  def __iter__(self):
315
  self.count = -1
 
450
  gb += self.imgs[i].nbytes
451
  pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
452
  pbar.close()
453
+
454
  def cache_labels(self, path=Path('./labels.cache'), prefix=''):
455
  # Cache dataset labels, check images and read shapes
456
  x = {} # dict
 
495
  pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
496
  f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
497
  pbar.close()
498
+
499
  if nf == 0:
500
  print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
501
 
 
1040
  b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
1041
  assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
1042
 
1043
+
1044
  def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
1045
  """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
1046
  Usage: from utils.datasets import *; autosplit('../coco128')
utils/general.py CHANGED
@@ -91,17 +91,20 @@ def check_git_status():
91
  print(e)
92
 
93
 
94
- def check_requirements(file='requirements.txt', exclude=()):
95
- # Check installed dependencies meet requirements
96
  import pkg_resources as pkg
97
  prefix = colorstr('red', 'bold', 'requirements:')
98
- file = Path(file)
99
- if not file.exists():
100
- print(f"{prefix} {file.resolve()} not found, check failed.")
101
- return
 
 
 
 
102
 
103
  n = 0 # number of packages updates
104
- requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
105
  for r in requirements:
106
  try:
107
  pkg.require(r)
@@ -111,7 +114,8 @@ def check_requirements(file='requirements.txt', exclude=()):
111
  print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())
112
 
113
  if n: # if packages updated
114
- s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \
 
115
  f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
116
  print(emojis(s)) # emoji-safe
117
 
 
91
  print(e)
92
 
93
 
94
+ def check_requirements(requirements='requirements.txt', exclude=()):
95
+ # Check installed dependencies meet requirements (pass *.txt file or list of packages)
96
  import pkg_resources as pkg
97
  prefix = colorstr('red', 'bold', 'requirements:')
98
+ if isinstance(requirements, (str, Path)): # requirements.txt file
99
+ file = Path(requirements)
100
+ if not file.exists():
101
+ print(f"{prefix} {file.resolve()} not found, check failed.")
102
+ return
103
+ requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
104
+ else: # list or tuple of packages
105
+ requirements = [x for x in requirements if x not in exclude]
106
 
107
  n = 0 # number of packages updates
 
108
  for r in requirements:
109
  try:
110
  pkg.require(r)
 
114
  print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())
115
 
116
  if n: # if packages updated
117
+ source = file.resolve() if 'file' in locals() else requirements
118
+ s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
119
  f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
120
  print(emojis(s)) # emoji-safe
121