yiyixuxu commited on
Commit
2251212
1 Parent(s): 8b1feb9
Files changed (3) hide show
  1. app.py +4 -5
  2. packages.txt +1 -2
  3. requirements.txt +1 -1
app.py CHANGED
@@ -49,9 +49,8 @@ def read_frames(dest_path):
49
  images.append(preprocess(image))
50
  return original_images, images
51
 
52
- def process_video_parallel(url, skip_frames, dest_path, process_number):
53
- cap = cv2.VideoCapture(url)
54
- num_processes = os.cpu_count()
55
  chunks_per_process = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) // (num_processes * skip_frames)
56
  count = skip_frames * chunks_per_process * process_number
57
  print(f"worker: {process_number}, process frames {count} ~ {skip_frames * chunks_per_process * (process_number + 1)}")
@@ -90,7 +89,7 @@ def vid2frames(url, sampling_interval=1, ext='mp4'):
90
  print('extracting frames...')
91
  n_workers = os.cpu_count()
92
  with Pool(n_workers) as pool:
93
- pool.map(partial(process_video_parallel, video, skip_frames, dest_path), range(n_workers))
94
  return dest_path
95
 
96
 
@@ -138,7 +137,7 @@ def run_inference(url, sampling_interval, search_query):
138
  return(title, image_output)
139
 
140
  inputs = [gr.inputs.Textbox(label="Give us the link to your youtube video!"),
141
- gr.Number(5),
142
  gr.inputs.Textbox(label="What do you want to search?")]
143
  outputs = [
144
  gr.outputs.HTML(label=""), # To be used as title
 
49
  images.append(preprocess(image))
50
  return original_images, images
51
 
52
+ def process_video_parallel(video, skip_frames, dest_path, num_processes, process_number):
53
+ cap = cv2.VideoCapture(video)
 
54
  chunks_per_process = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) // (num_processes * skip_frames)
55
  count = skip_frames * chunks_per_process * process_number
56
  print(f"worker: {process_number}, process frames {count} ~ {skip_frames * chunks_per_process * (process_number + 1)}")
 
89
  print('extracting frames...')
90
  n_workers = os.cpu_count()
91
  with Pool(n_workers) as pool:
92
+ pool.map(partial(process_video_parallel, video, skip_frames, dest_path, n_workers), range(n_workers))
93
  return dest_path
94
 
95
 
 
137
  return(title, image_output)
138
 
139
  inputs = [gr.inputs.Textbox(label="Give us the link to your youtube video!"),
140
+ gr.Number(5,label='sampling interval (seconds)'),
141
  gr.inputs.Textbox(label="What do you want to search?")]
142
  outputs = [
143
  gr.outputs.HTML(label=""), # To be used as title
packages.txt CHANGED
@@ -1,2 +1 @@
1
- python3-opencv
2
- libssl-dev
 
1
+ ffmpeg
 
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
  git+https://github.com/openai/CLIP.git
2
  torch
3
  youtube_dl
4
- opencv-python
 
1
  git+https://github.com/openai/CLIP.git
2
  torch
3
  youtube_dl
4
+ opencv-python-headless