dylan-plummer commited on
Commit
87599df
·
1 Parent(s): cf90a22

set up hls stream interface

Browse files
Files changed (2) hide show
  1. app.py +27 -8
  2. hls_download.py +35 -0
app.py CHANGED
@@ -22,6 +22,8 @@ import torchvision.transforms.functional as F
22
  from huggingface_hub import hf_hub_download
23
  from huggingface_hub import HfApi
24
 
 
 
25
  plt.style.use('dark_background')
26
 
27
  onnx_file = hf_hub_download(repo_id="dylanplummer/ropenet", filename="nextjump.onnx", repo_type="model", token=os.environ['DATASET_SECRET'])
@@ -50,12 +52,14 @@ def sigmoid(x):
50
 
51
 
52
  @spaces.GPU()
53
- def inference(x, count_only_api, api_key,
54
  img_size=288, seq_len=64, stride_length=32, stride_pad=3, batch_size=4,
55
  miss_threshold=0.8, marks_threshold=0.5, median_pred_filter=True, center_crop=True, both_feet=True,
56
  api_call=False,
57
  progress=gr.Progress()):
58
  progress(0, desc="Starting...")
 
 
59
  # check if GPU is available
60
  if torch.cuda.is_available():
61
  providers = [("CUDAExecutionProvider", {"device_id": torch.cuda.current_device(),
@@ -330,9 +334,13 @@ DESCRIPTION += '\nDemo created by [Dylan Plummer](https://dylan-plummer.github.i
330
 
331
  with gr.Blocks(theme='WeixuanYuan/Soft_dark') as demo:
332
  gr.Markdown(DESCRIPTION)
333
- in_video = gr.PlayableVideo(label="Input Video", elem_id='input-video', format='mp4',
334
- width=400, height=400, interactive=True, container=True,
335
- max_length=150)
 
 
 
 
336
 
337
  with gr.Row():
338
  run_button = gr.Button(value="Run", elem_id='run-button', scale=1)
@@ -374,8 +382,19 @@ with gr.Blocks(theme='WeixuanYuan/Soft_dark') as demo:
374
 
375
  demo_inference = partial(inference, count_only_api=False, api_key=None)
376
 
 
 
 
 
 
 
 
 
 
 
 
377
  gr.Examples(examples=[
378
- [os.path.join(os.path.dirname(__file__), "files", "dylan.mp4")],
379
  #[os.path.join(os.path.dirname(__file__), "files", "train14.mp4")],
380
  #[os.path.join(os.path.dirname(__file__), "files", "train_17.mp4")],
381
  #[os.path.join(os.path.dirname(__file__), "files", "train13.mp4")],
@@ -388,13 +407,13 @@ with gr.Blocks(theme='WeixuanYuan/Soft_dark') as demo:
388
  #[os.path.join(os.path.dirname(__file__), "files", "train_66.mp4")],
389
  #[os.path.join(os.path.dirname(__file__), "files", "train_21.mp4")]
390
  ],
391
- inputs=[in_video],
392
  outputs=[out_text, out_plot, out_hist, out_event_type_dist],
393
  fn=demo_inference, cache_examples=os.getenv('SYSTEM') == 'spaces')
394
 
395
- run_button.click(demo_inference, [in_video], outputs=[out_text, out_plot, out_hist, out_event_type_dist])
396
  api_inference = partial(inference, api_call=True)
397
- api_dummy_button.click(api_inference, [in_video, count_only, api_token], outputs=[period_length], api_name='inference')
398
 
399
 
400
  if __name__ == "__main__":
 
22
  from huggingface_hub import hf_hub_download
23
  from huggingface_hub import HfApi
24
 
25
+ from hls_download import download_clips
26
+
27
  plt.style.use('dark_background')
28
 
29
  onnx_file = hf_hub_download(repo_id="dylanplummer/ropenet", filename="nextjump.onnx", repo_type="model", token=os.environ['DATASET_SECRET'])
 
52
 
53
 
54
  @spaces.GPU()
55
+ def inference(stream_url, start_time, end_time, count_only_api, api_key,
56
  img_size=288, seq_len=64, stride_length=32, stride_pad=3, batch_size=4,
57
  miss_threshold=0.8, marks_threshold=0.5, median_pred_filter=True, center_crop=True, both_feet=True,
58
  api_call=False,
59
  progress=gr.Progress()):
60
  progress(0, desc="Starting...")
61
+
62
+ x = download_clips(stream_url, os.getcwd(), int(start_time), int(end_time))
63
  # check if GPU is available
64
  if torch.cuda.is_available():
65
  providers = [("CUDAExecutionProvider", {"device_id": torch.cuda.current_device(),
 
334
 
335
  with gr.Blocks(theme='WeixuanYuan/Soft_dark') as demo:
336
  gr.Markdown(DESCRIPTION)
337
+ # in_video = gr.PlayableVideo(label="Input Video", elem_id='input-video', format='mp4',
338
+ # width=400, height=400, interactive=True, container=True,
339
+ # max_length=150)
340
+ with gr.Row():
341
+ in_stream_url = gr.Textbox(label="Stream URL", elem_id='stream-url', visible=True)
342
+ in_stream_start = gr.Textbox(label="Start Time", elem_id='stream-start', visible=True)
343
+ in_stream_end = gr.Textbox(label="End Time", elem_id='stream-end', visible=True)
344
 
345
  with gr.Row():
346
  run_button = gr.Button(value="Run", elem_id='run-button', scale=1)
 
382
 
383
  demo_inference = partial(inference, count_only_api=False, api_key=None)
384
 
385
+ # playlist_url = "https://hiemdall-dev2.azurewebsites.net/api/playlist/"
386
+
387
+ # stream_urls = [
388
+ # #f"{playlist_url}rec_v3v44xW3/vod",
389
+ # f"{playlist_url}rec_DA6NyTCw/vod",
390
+ # f"{playlist_url}rec_aY68fV8L/vod",
391
+ # f"{playlist_url}rec_d9W4ugUl/vod",
392
+ # f"{playlist_url}rec_rTxSIdcO/vod",
393
+ # f"{playlist_url}rec_rd2FAyUo/vod",
394
+ # ]
395
+
396
  gr.Examples(examples=[
397
+ ['https://hiemdall-dev2.azurewebsites.net/api/playlist/rec_DA6NyTCw/vod', '0', '30'],
398
  #[os.path.join(os.path.dirname(__file__), "files", "train14.mp4")],
399
  #[os.path.join(os.path.dirname(__file__), "files", "train_17.mp4")],
400
  #[os.path.join(os.path.dirname(__file__), "files", "train13.mp4")],
 
407
  #[os.path.join(os.path.dirname(__file__), "files", "train_66.mp4")],
408
  #[os.path.join(os.path.dirname(__file__), "files", "train_21.mp4")]
409
  ],
410
+ inputs=[in_stream_url, in_stream_start, in_stream_end],
411
  outputs=[out_text, out_plot, out_hist, out_event_type_dist],
412
  fn=demo_inference, cache_examples=os.getenv('SYSTEM') == 'spaces')
413
 
414
+ run_button.click(demo_inference, [in_stream_url, in_stream_start, in_stream_end], outputs=[out_text, out_plot, out_hist, out_event_type_dist])
415
  api_inference = partial(inference, api_call=True)
416
+ api_dummy_button.click(api_inference, [in_stream_url, in_stream_start, in_stream_end, count_only, api_token], outputs=[period_length], api_name='inference')
417
 
418
 
419
  if __name__ == "__main__":
hls_download.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import os
3
+ # import m3u8
4
+ # import numpy as np
5
+ # import matplotlib.pyplot as plt
6
+ # from tqdm import tqdm
7
+ # from moviepy.editor import VideoFileClip
8
+ # from scipy.signal import correlate, find_peaks
9
+ # from scipy.io import wavfile
10
+
11
+
12
+ def download_clips(stream_url, out_dir, start_time, end_time, resize=True):
13
+ output_file = os.path.join(out_dir, f"train_{len(os.listdir(out_dir))}.mp4")
14
+ # output video at 30fps
15
+ if resize:
16
+ subprocess.run(['ffmpeg', '-i', stream_url,
17
+ '-ss', str(start_time - 2),
18
+ '-to', str(end_time + 11), # pad end time by a couple of seconds
19
+ '-r', str(30),
20
+ '-vcodec', 'libx264',
21
+ '-crf', str(24),
22
+ '-preset', 'fast',
23
+ '-vf', 'scale=-2:540',
24
+ '-c:a', 'copy', output_file])
25
+ else:
26
+ subprocess.run(['ffmpeg', '-i', stream_url,
27
+ '-ss', str(start_time - 2),
28
+ '-to', str(end_time + 11), # pad end time by a couple of seconds
29
+ '-r', str(30),
30
+ '-vcodec', 'libx264',
31
+ '-crf', str(24),
32
+ '-preset', 'fast',
33
+ '-vf', 'scale=-2:720',
34
+ '-c:a', 'copy', output_file])
35
+ return output_file