Lovish Singla commited on
Commit
219d623
·
unverified ·
1 Parent(s): 7a11b1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -45
app.py CHANGED
@@ -9,61 +9,61 @@ import os
9
 
10
  # Function to extract VGG16 features from a frame
11
  def extract_vgg_features(frame):
12
- frame = cv2.resize(frame, (224, 224))
13
- img_array = image.img_to_array(frame)
14
- img_array = np.expand_dims(img_array, axis=0)
15
- img_array = preprocess_input(img_array)
16
- features = VGG16(weights="imagenet", include_top=False, pooling="avg").predict(img_array)
17
- return features.flatten()
18
 
19
  # Function to compute histogram difference
20
  def histogram_difference(frame1, frame2):
21
- hist1 = cv2.calcHist([frame1], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
22
- hist2 = cv2.calcHist([frame2], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
23
- hist1 = cv2.normalize(hist1, hist1).flatten()
24
- hist2 = cv2.normalize(hist2, hist2).flatten()
25
- return cv2.compareHist(hist1, hist2, cv2.HISTCMP_BHATTACHARYYA)
26
 
27
  # Function to detect scene changes using histogram comparison
28
  def detect_scene_changes(video_path, threshold=0.2):
29
- cap = cv2.VideoCapture(video_path)
30
  prev_frame = None
31
  scene_change_frames = []
32
 
33
  while True:
34
- ret, frame = cap.read()
35
  if not ret:
36
  break
37
 
38
  if prev_frame is not None:
39
- diff = histogram_difference(prev_frame, frame)
40
- if diff > threshold:
41
  scene_change_frames.append(frame)
42
 
43
- prev_frame = frame
44
 
45
- cap.release()
46
- return scene_change_frames[:5] # Limit to 20 frames
47
 
48
  # Function to select frames based on motion
49
  def motion_based_selection(video_path, num_frames=5):
50
- cap = cv2.VideoCapture(video_path)
51
  prev_frame = None
52
  motion_scores = []
53
 
54
  while True:
55
- ret, frame = cap.read()
56
  if not ret:
57
  break
58
 
59
  if prev_frame is not None:
60
- diff = cv2.absdiff(prev_frame, frame)
61
- motion_score = np.mean(diff)
62
- motion_scores.append((frame, motion_score))
63
 
64
- prev_frame = frame
65
 
66
- cap.release()
67
 
68
  # Sort frames by motion score and select top frames
69
  motion_scores.sort(key=lambda x: x[1], reverse=True)
@@ -72,47 +72,47 @@ def motion_based_selection(video_path, num_frames=5):
72
 
73
  # Function to cluster frames using VGG16 features
74
  def cluster_frames(video_path, num_clusters=5):
75
- cap = cv2.VideoCapture(video_path)
76
  frames = []
77
  features = []
78
 
79
  while True:
80
- ret, frame = cap.read()
81
  if not ret:
82
  break
83
 
84
- frames.append(frame)
85
- feature = extract_vgg_features(frame)
86
- features.append(feature)
87
 
88
- cap.release()
89
 
90
  # Perform K-Means clustering
91
  kmeans = KMeans(n_clusters=num_clusters, random_state=42)
92
- clusters = kmeans.fit_predict(features)
93
 
94
  # Select one frame from each cluster
95
  selected_frames = []
96
  for cluster_id in range(num_clusters):
97
- cluster_indices = np.where(clusters == cluster_id)[0]
98
  centroid_index = cluster_indices[0] # Select the first frame in the cluster
99
- selected_frames.append(frames[centroid_index])
100
 
101
  return selected_frames
102
 
103
  # Function to convert video to 15 FPS
104
  def convert_to_15fps(video_path, output_path):
105
- cap = cv2.VideoCapture(video_path)
106
- fps = int(cap.get(cv2.CAP_PROP_FPS))
107
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
108
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
109
 
110
  # Define the codec and create VideoWriter object
111
- fourcc = cv2.VideoWriter_fourcc(*"mp4v")
112
- out = cv2.VideoWriter(output_path, fourcc, 15, (width, height))
113
 
114
  while True:
115
- ret, frame = cap.read()
116
  if not ret:
117
  break
118
 
@@ -123,13 +123,13 @@ def convert_to_15fps(video_path, output_path):
123
  for _ in range(int(fps / 15) - 1):
124
  cap.read()
125
 
126
- cap.release()
127
- out.release()
128
 
129
  # Streamlit app
130
  def main():
131
  st.title("Video Frame Selection App")
132
- st.write("Upload a 60-second video to extract the best 20 frames using three methods.")
133
 
134
  # Upload video
135
  uploaded_file = st.file_uploader("Upload a 60-second video", type=["mp4", "avi", "mov"])
@@ -167,4 +167,4 @@ def main():
167
 
168
  # Run the app
169
  if __name__ == "__main__":
170
- main()
 
9
 
10
  # Function to extract VGG16 features from a frame
11
  def extract_vgg_features(frame):
12
+ frame = cv2.resize(frame, (224, 224)) # Resize frame to 224x224 (required by VGG16)
13
+ img_array = image.img_to_array(frame) # Convert frame to a NumPy array
14
+ img_array = np.expand_dims(img_array, axis=0) # Add batch dimension
15
+ img_array = preprocess_input(img_array) # Preprocess input for VGG16
16
+ features = VGG16(weights="imagenet", include_top=False, pooling="avg").predict(img_array) # Extract features
17
+ return features.flatten() # Flatten features to 1D array
18
 
19
  # Function to compute histogram difference
20
  def histogram_difference(frame1, frame2):
21
+ hist1 = cv2.calcHist([frame1], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256]) # Compute histogram for frame1
22
+ hist2 = cv2.calcHist([frame2], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256]) # Compute histogram for frame2
23
+ hist1 = cv2.normalize(hist1, hist1).flatten() # Normalize and flatten histogram
24
+ hist2 = cv2.normalize(hist2, hist2).flatten() # Normalize and flatten histogram
25
+ return cv2.compareHist(hist1, hist2, cv2.HISTCMP_BHATTACHARYYA) # Compare histograms
26
 
27
  # Function to detect scene changes using histogram comparison
28
  def detect_scene_changes(video_path, threshold=0.2):
29
+ cap = cv2.VideoCapture(video_path) # Open the video file
30
  prev_frame = None
31
  scene_change_frames = []
32
 
33
  while True:
34
+ ret, frame = cap.read() # Read the next frame
35
  if not ret:
36
  break
37
 
38
  if prev_frame is not None:
39
+ diff = histogram_difference(prev_frame, frame) # Compute histogram difference
40
+ if diff > threshold: # If difference exceeds threshold, consider it a scene change
41
  scene_change_frames.append(frame)
42
 
43
+ prev_frame = frame # Update previous frame
44
 
45
+ cap.release() # Release the video capture object
46
+ return scene_change_frames[:5] # Limit to 5 frames
47
 
48
  # Function to select frames based on motion
49
  def motion_based_selection(video_path, num_frames=5):
50
+ cap = cv2.VideoCapture(video_path) # Open the video file
51
  prev_frame = None
52
  motion_scores = []
53
 
54
  while True:
55
+ ret, frame = cap.read() # Read the next frame
56
  if not ret:
57
  break
58
 
59
  if prev_frame is not None:
60
+ diff = cv2.absdiff(prev_frame, frame) # Compute absolute difference between frames
61
+ motion_score = np.mean(diff) # Compute mean difference as motion score
62
+ motion_scores.append((frame, motion_score)) # Save frame and motion score
63
 
64
+ prev_frame = frame # Update previous frame
65
 
66
+ cap.release() # Release the video capture object
67
 
68
  # Sort frames by motion score and select top frames
69
  motion_scores.sort(key=lambda x: x[1], reverse=True)
 
72
 
73
  # Function to cluster frames using VGG16 features
74
  def cluster_frames(video_path, num_clusters=5):
75
+ cap = cv2.VideoCapture(video_path) # Open the video file
76
  frames = []
77
  features = []
78
 
79
  while True:
80
+ ret, frame = cap.read() # Read the next frame
81
  if not ret:
82
  break
83
 
84
+ frames.append(frame) # Save the frame
85
+ feature = extract_vgg_features(frame) # Extract features using VGG16
86
+ features.append(feature) # Save the features
87
 
88
+ cap.release() # Release the video capture object
89
 
90
  # Perform K-Means clustering
91
  kmeans = KMeans(n_clusters=num_clusters, random_state=42)
92
+ clusters = kmeans.fit_predict(features) # Cluster the frames
93
 
94
  # Select one frame from each cluster
95
  selected_frames = []
96
  for cluster_id in range(num_clusters):
97
+ cluster_indices = np.where(clusters == cluster_id)[0] # Find frames in the cluster
98
  centroid_index = cluster_indices[0] # Select the first frame in the cluster
99
+ selected_frames.append(frames[centroid_index]) # Save the frame
100
 
101
  return selected_frames
102
 
103
  # Function to convert video to 15 FPS
104
  def convert_to_15fps(video_path, output_path):
105
+ cap = cv2.VideoCapture(video_path) # Open the video file
106
+ fps = int(cap.get(cv2.CAP_PROP_FPS)) # Get the original FPS
107
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # Get the frame width
108
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # Get the frame height
109
 
110
  # Define the codec and create VideoWriter object
111
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Use MP4 codec
112
+ out = cv2.VideoWriter(output_path, fourcc, 15, (width, height)) # Set output FPS to 15
113
 
114
  while True:
115
+ ret, frame = cap.read() # Read the next frame
116
  if not ret:
117
  break
118
 
 
123
  for _ in range(int(fps / 15) - 1):
124
  cap.read()
125
 
126
+ cap.release() # Release the video capture object
127
+ out.release() # Release the video writer object
128
 
129
  # Streamlit app
130
  def main():
131
  st.title("Video Frame Selection App")
132
+ st.write("Upload a 60-second video to extract the best 5 frames using three methods.")
133
 
134
  # Upload video
135
  uploaded_file = st.file_uploader("Upload a 60-second video", type=["mp4", "avi", "mov"])
 
167
 
168
  # Run the app
169
  if __name__ == "__main__":
170
+ main()