Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
import torch.optim as optim
|
7 |
+
from facenet_pytorch import InceptionResnetV1, MTCNN
|
8 |
+
import mediapipe as mp
|
9 |
+
from fer import FER
|
10 |
+
from sklearn.cluster import KMeans
|
11 |
+
from sklearn.preprocessing import StandardScaler, MinMaxScaler
|
12 |
+
from sklearn.metrics import silhouette_score
|
13 |
+
from scipy.spatial.distance import cdist
|
14 |
+
import umap
|
15 |
+
import pandas as pd
|
16 |
+
import matplotlib.pyplot as plt
|
17 |
+
from matplotlib.ticker import MaxNLocator
|
18 |
+
import gradio as gr
|
19 |
+
import tempfile
|
20 |
+
|
21 |
+
# Initialize models and other global variables
|
22 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
23 |
+
|
24 |
+
mtcnn = MTCNN(keep_all=False, device=device, thresholds=[0.999, 0.999, 0.999], min_face_size=100, selection_method='largest')
|
25 |
+
model = InceptionResnetV1(pretrained='vggface2').eval().to(device)
|
26 |
+
mp_face_mesh = mp.solutions.face_mesh
|
27 |
+
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.5)
|
28 |
+
emotion_detector = FER(mtcnn=False)
|
29 |
+
|
30 |
+
def frame_to_timecode(frame_num, original_fps, desired_fps):
|
31 |
+
total_seconds = frame_num / original_fps
|
32 |
+
hours = int(total_seconds // 3600)
|
33 |
+
minutes = int((total_seconds % 3600) // 60)
|
34 |
+
seconds = int(total_seconds % 60)
|
35 |
+
milliseconds = int((total_seconds - int(total_seconds)) * 1000)
|
36 |
+
return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}"
|
37 |
+
|
38 |
+
def get_face_embedding_and_emotion(face_img):
|
39 |
+
face_tensor = torch.tensor(face_img).permute(2, 0, 1).unsqueeze(0).float() / 255
|
40 |
+
face_tensor = (face_tensor - 0.5) / 0.5
|
41 |
+
face_tensor = face_tensor.to(device)
|
42 |
+
with torch.no_grad():
|
43 |
+
embedding = model(face_tensor)
|
44 |
+
|
45 |
+
emotions = emotion_detector.detect_emotions(face_img)
|
46 |
+
if emotions:
|
47 |
+
emotion_dict = emotions[0]['emotions']
|
48 |
+
else:
|
49 |
+
emotion_dict = {e: 0 for e in ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']}
|
50 |
+
|
51 |
+
return embedding.cpu().numpy().flatten(), emotion_dict
|
52 |
+
|
53 |
+
def alignFace(img):
|
54 |
+
img_raw = img.copy()
|
55 |
+
results = face_mesh.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
56 |
+
if not results.multi_face_landmarks:
|
57 |
+
return None
|
58 |
+
landmarks = results.multi_face_landmarks[0].landmark
|
59 |
+
left_eye = np.array([[landmarks[33].x, landmarks[33].y], [landmarks[160].x, landmarks[160].y],
|
60 |
+
[landmarks[158].x, landmarks[158].y], [landmarks[144].x, landmarks[144].y],
|
61 |
+
[landmarks[153].x, landmarks[153].y], [landmarks[145].x, landmarks[145].y]])
|
62 |
+
right_eye = np.array([[landmarks[362].x, landmarks[362].y], [landmarks[385].x, landmarks[385].y],
|
63 |
+
[landmarks[387].x, landmarks[387].y], [landmarks[263].x, landmarks[263].y],
|
64 |
+
[landmarks[373].x, landmarks[373].y], [landmarks[380].x, landmarks[380].y]])
|
65 |
+
left_eye_center = left_eye.mean(axis=0).astype(np.int32)
|
66 |
+
right_eye_center = right_eye.mean(axis=0).astype(np.int32)
|
67 |
+
dY = right_eye_center[1] - left_eye_center[1]
|
68 |
+
dX = right_eye_center[0] - left_eye_center[0]
|
69 |
+
angle = np.degrees(np.arctan2(dY, dX))
|
70 |
+
desired_angle = 0
|
71 |
+
angle_diff = desired_angle - angle
|
72 |
+
height, width = img_raw.shape[:2]
|
73 |
+
center = (width // 2, height // 2)
|
74 |
+
rotation_matrix = cv2.getRotationMatrix2D(center, angle_diff, 1)
|
75 |
+
new_img = cv2.warpAffine(img_raw, rotation_matrix, (width, height))
|
76 |
+
return new_img
|
77 |
+
|
78 |
+
def extract_and_align_faces_from_video(video_path, aligned_faces_folder, desired_fps):
|
79 |
+
video = cv2.VideoCapture(video_path)
|
80 |
+
if not video.isOpened():
|
81 |
+
print(f"Error: Could not open video file at {video_path}")
|
82 |
+
return {}, {}, desired_fps, 0
|
83 |
+
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
84 |
+
original_fps = video.get(cv2.CAP_PROP_FPS)
|
85 |
+
if frame_count == 0:
|
86 |
+
print(f"Error: Video file at {video_path} appears to be empty")
|
87 |
+
return {}, {}, desired_fps, 0
|
88 |
+
embeddings_by_frame = {}
|
89 |
+
emotions_by_frame = {}
|
90 |
+
|
91 |
+
for frame_num in range(0, frame_count, int(original_fps / desired_fps)):
|
92 |
+
video.set(cv2.CAP_PROP_POS_FRAMES, frame_num)
|
93 |
+
ret, frame = video.read()
|
94 |
+
if not ret or frame is None:
|
95 |
+
print(f"Error: Could not read frame {frame_num}")
|
96 |
+
continue
|
97 |
+
try:
|
98 |
+
boxes, probs = mtcnn.detect(frame)
|
99 |
+
if boxes is not None and len(boxes) > 0:
|
100 |
+
box = boxes[0]
|
101 |
+
if probs[0] >= 0.99:
|
102 |
+
x1, y1, x2, y2 = [int(b) for b in box]
|
103 |
+
face = frame[y1:y2, x1:x2]
|
104 |
+
aligned_face = alignFace(face)
|
105 |
+
if aligned_face is not None:
|
106 |
+
aligned_face_resized = cv2.resize(aligned_face, (160, 160))
|
107 |
+
output_path = os.path.join(aligned_faces_folder, f"frame_{frame_num}_face.jpg")
|
108 |
+
cv2.imwrite(output_path, aligned_face_resized)
|
109 |
+
embedding, emotion = get_face_embedding_and_emotion(aligned_face_resized)
|
110 |
+
embeddings_by_frame[frame_num] = embedding
|
111 |
+
emotions_by_frame[frame_num] = emotion
|
112 |
+
except Exception as e:
|
113 |
+
print(f"Error processing frame {frame_num}: {str(e)}")
|
114 |
+
continue
|
115 |
+
|
116 |
+
video.release()
|
117 |
+
return embeddings_by_frame, emotions_by_frame, desired_fps, original_fps
|
118 |
+
|
119 |
+
def cluster_embeddings(embeddings):
|
120 |
+
if len(embeddings) < 2:
|
121 |
+
print("Not enough embeddings for clustering. Assigning all to one cluster.")
|
122 |
+
return np.zeros(len(embeddings), dtype=int)
|
123 |
+
n_clusters = min(3, len(embeddings)) # Use at most 3 clusters
|
124 |
+
scaler = StandardScaler()
|
125 |
+
embeddings_scaled = scaler.fit_transform(embeddings)
|
126 |
+
kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
|
127 |
+
clusters = kmeans.fit_predict(embeddings_scaled)
|
128 |
+
return clusters
|
129 |
+
|
130 |
+
def organize_faces_by_person(embeddings_by_frame, clusters, aligned_faces_folder, organized_faces_folder):
|
131 |
+
for (frame_num, embedding), cluster in zip(embeddings_by_frame.items(), clusters):
|
132 |
+
person_folder = os.path.join(organized_faces_folder, f"person_{cluster}")
|
133 |
+
os.makedirs(person_folder, exist_ok=True)
|
134 |
+
src = os.path.join(aligned_faces_folder, f"frame_{frame_num}_face.jpg")
|
135 |
+
dst = os.path.join(person_folder, f"frame_{frame_num}_face.jpg")
|
136 |
+
shutil.copy(src, dst)
|
137 |
+
|
138 |
+
def save_person_data_to_csv(embeddings_by_frame, emotions_by_frame, clusters, desired_fps, original_fps, output_folder, num_components):
|
139 |
+
emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'neutral']
|
140 |
+
person_data = {}
|
141 |
+
|
142 |
+
for (frame_num, embedding), (_, emotion_dict), cluster in zip(embeddings_by_frame.items(),
|
143 |
+
emotions_by_frame.items(), clusters):
|
144 |
+
if cluster not in person_data:
|
145 |
+
person_data[cluster] = []
|
146 |
+
person_data[cluster].append((frame_num, embedding, {e: emotion_dict[e] for e in emotions}))
|
147 |
+
|
148 |
+
largest_cluster = max(person_data, key=lambda k: len(person_data[k]))
|
149 |
+
|
150 |
+
data = person_data[largest_cluster]
|
151 |
+
data.sort(key=lambda x: x[0])
|
152 |
+
frames, embeddings, emotions_data = zip(*data)
|
153 |
+
|
154 |
+
embeddings_array = np.array(embeddings)
|
155 |
+
np.save(os.path.join(output_folder, 'face_embeddings.npy'), embeddings_array)
|
156 |
+
|
157 |
+
reducer = umap.UMAP(n_components=num_components, random_state=1)
|
158 |
+
embeddings_reduced = reducer.fit_transform(embeddings)
|
159 |
+
|
160 |
+
scaler = MinMaxScaler(feature_range=(0, 1))
|
161 |
+
embeddings_reduced_normalized = scaler.fit_transform(embeddings_reduced)
|
162 |
+
|
163 |
+
timecodes = [frame_to_timecode(frame, original_fps, desired_fps) for frame in frames]
|
164 |
+
times_in_minutes = [frame / (original_fps * 60) for frame in frames]
|
165 |
+
|
166 |
+
df_data = {
|
167 |
+
'Frame': frames,
|
168 |
+
'Timecode': timecodes,
|
169 |
+
'Time (Minutes)': times_in_minutes,
|
170 |
+
'Embedding_Index': range(len(embeddings))
|
171 |
+
}
|
172 |
+
|
173 |
+
for i in range(num_components):
|
174 |
+
df_data[f'Comp {i + 1}'] = embeddings_reduced_normalized[:, i]
|
175 |
+
|
176 |
+
for emotion in emotions:
|
177 |
+
df_data[emotion] = [e[emotion] for e in emotions_data]
|
178 |
+
|
179 |
+
df = pd.DataFrame(df_data)
|
180 |
+
|
181 |
+
return df, largest_cluster
|
182 |
+
|
183 |
+
class LSTMAutoencoder(nn.Module):
|
184 |
+
def __init__(self, input_size, hidden_size=64, num_layers=2):
|
185 |
+
super(LSTMAutoencoder, self).__init__()
|
186 |
+
self.input_size = input_size
|
187 |
+
self.hidden_size = hidden_size
|
188 |
+
self.num_layers = num_layers
|
189 |
+
|
190 |
+
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
|
191 |
+
self.fc = nn.Linear(hidden_size, input_size)
|
192 |
+
|
193 |
+
def forward(self, x):
|
194 |
+
_, (hidden, _) = self.lstm(x)
|
195 |
+
out = self.fc(hidden[-1])
|
196 |
+
return out
|
197 |
+
|
198 |
+
def lstm_anomaly_detection(X, feature_columns, num_anomalies=10, epochs=100, batch_size=64):
|
199 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
200 |
+
|
201 |
+
X = torch.FloatTensor(X).to(device)
|
202 |
+
|
203 |
+
train_size = int(0.85 * len(X))
|
204 |
+
X_train, X_val = X[:train_size], X[train_size:]
|
205 |
+
|
206 |
+
model = LSTMAutoencoder(input_size=len(feature_columns)).to(device)
|
207 |
+
criterion = nn.MSELoss()
|
208 |
+
optimizer = optim.Adam(model.parameters())
|
209 |
+
|
210 |
+
for epoch in range(epochs):
|
211 |
+
model.train()
|
212 |
+
optimizer.zero_grad()
|
213 |
+
output_train = model(X_train.unsqueeze(0))
|
214 |
+
loss_train = criterion(output_train, X_train)
|
215 |
+
loss_train.backward()
|
216 |
+
optimizer.step()
|
217 |
+
|
218 |
+
model.eval()
|
219 |
+
with torch.no_grad():
|
220 |
+
output_val = model(X_val.unsqueeze(0))
|
221 |
+
loss_val = criterion(output_val, X_val)
|
222 |
+
|
223 |
+
model.eval()
|
224 |
+
with torch.no_grad():
|
225 |
+
reconstructed = model(X.unsqueeze(0)).squeeze(0).cpu().numpy()
|
226 |
+
|
227 |
+
mse = np.mean(np.power(X.cpu().numpy() - reconstructed, 2), axis=1)
|
228 |
+
|
229 |
+
top_indices = mse.argsort()[-num_anomalies:][::-1]
|
230 |
+
anomalies = np.zeros(len(mse), dtype=bool)
|
231 |
+
anomalies[top_indices] = True
|
232 |
+
|
233 |
+
return anomalies, mse, top_indices, model
|
234 |
+
|
235 |
+
def plot_anomaly_scores(df, anomaly_scores, top_indices, title):
|
236 |
+
fig, ax = plt.subplots(figsize=(16, 8))
|
237 |
+
bars = ax.bar(range(len(df)), anomaly_scores, width=0.8)
|
238 |
+
for i in top_indices:
|
239 |
+
bars[i].set_color('red')
|
240 |
+
ax.set_xlabel('Timecode')
|
241 |
+
ax.set_ylabel('Anomaly Score')
|
242 |
+
ax.set_title(f'Anomaly Scores Over Time ({title})')
|
243 |
+
ax.xaxis.set_major_locator(MaxNLocator(nbins=100))
|
244 |
+
ticks = ax.get_xticks()
|
245 |
+
ax.set_xticklabels([df['Timecode'].iloc[int(tick)] if tick >= 0 and tick < len(df) else '' for tick in ticks], rotation=90, ha='right')
|
246 |
+
plt.tight_layout()
|
247 |
+
return fig
|
248 |
+
|
249 |
+
def plot_emotion(df, emotion):
|
250 |
+
fig, ax = plt.subplots(figsize=(16, 8))
|
251 |
+
values = df[emotion].values
|
252 |
+
bars = ax.bar(range(len(df)), values, width=0.8)
|
253 |
+
top_10_indices = np.argsort(values)[-10:]
|
254 |
+
for i, bar in enumerate(bars):
|
255 |
+
if i in top_10_indices:
|
256 |
+
bar.set_color('red')
|
257 |
+
ax.set_xlabel('Timecode')
|
258 |
+
ax.set_ylabel(f'{emotion.capitalize()} Score')
|
259 |
+
ax.set_title(f'{emotion.capitalize()} Scores Over Time')
|
260 |
+
ax.xaxis.set_major_locator(MaxNLocator(nbins=100))
|
261 |
+
ticks = ax.get_xticks()
|
262 |
+
ax.set_xticklabels([df['Timecode'].iloc[int(tick)] if tick >= 0 and tick < len(df) else '' for tick in ticks], rotation=90, ha='right')
|
263 |
+
plt.tight_layout()
|
264 |
+
return fig
|
265 |
+
|
266 |
+
def process_video(video_path, num_anomalies, num_components, desired_fps, batch_size, progress=gr.Progress()):
|
267 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
268 |
+
aligned_faces_folder = os.path.join(temp_dir, 'aligned_faces')
|
269 |
+
organized_faces_folder = os.path.join(temp_dir, 'organized_faces')
|
270 |
+
os.makedirs(aligned_faces_folder, exist_ok=True)
|
271 |
+
os.makedirs(organized_faces_folder, exist_ok=True)
|
272 |
+
|
273 |
+
progress(0.1, "Extracting and aligning faces")
|
274 |
+
embeddings_by_frame, emotions_by_frame, _, original_fps = extract_and_align_faces_from_video(video_path, aligned_faces_folder, desired_fps)
|
275 |
+
|
276 |
+
if not embeddings_by_frame:
|
277 |
+
return "No faces were extracted from the video.", None, None, None, None
|
278 |
+
|
279 |
+
progress(0.3, "Clustering embeddings")
|
280 |
+
embeddings = list(embeddings_by_frame.values())
|
281 |
+
clusters = cluster_embeddings(embeddings)
|
282 |
+
|
283 |
+
progress(0.4, "Organizing faces")
|
284 |
+
organize_faces_by_person(embeddings_by_frame, clusters, aligned_faces_folder, organized_faces_folder)
|
285 |
+
|
286 |
+
progress(0.5, "Saving person data")
|
287 |
+
df, largest_cluster = save_person_data_to_csv(embeddings_by_frame, emotions_by_frame, clusters, desired_fps, original_fps, temp_dir, num_components)
|
288 |
+
|
289 |
+
progress(0.6, "Performing anomaly detection")
|
290 |
+
feature_columns = [col for col in df.columns if col not in ['Frame', 'Timecode', 'Time (Minutes)', 'Embedding_Index']]
|
291 |
+
anomalies_all, anomaly_scores_all, top_indices_all, _ = lstm_anomaly_detection(df[feature_columns].values, feature_columns, num_anomalies=num_anomalies, batch_size=batch_size)
|
292 |
+
|
293 |
+
progress(0.8, "Generating plots")
|
294 |
+
anomaly_plot = plot_anomaly_scores(df, anomaly_scores_all, top_indices_all, "All Features")
|
295 |
+
emotion_plots = [plot_emotion(df, emotion) for emotion in ['fear', 'sad', 'angry']]
|
296 |
+
|
297 |
+
progress(0.9, "Preparing results")
|
298 |
+
results = f"Top {num_anomalies} anomalies (All Features):\n"
|
299 |
+
results += "\n".join([f"{score:.4f} at {timecode}" for score, timecode in
|
300 |
+
zip(anomaly_scores_all[top_indices_all], df['Timecode'].iloc[top_indices_all].values)])
|
301 |
+
|
302 |
+
progress(1.0, "Complete")
|
303 |
+
return results, anomaly_plot, *emotion_plots
|
304 |
+
|
305 |
+
# Gradio interface
|
306 |
+
iface = gr.Interface(
|
307 |
+
fn=process_video,
|
308 |
+
inputs=[
|
309 |
+
gr.Video(),
|
310 |
+
gr.Slider(minimum=1, maximum=20, step=1, value=10, label="Number of Anomalies"),
|
311 |
+
gr.Slider(minimum=2, maximum=5, step=1, value=3, label="Number of Components"),
|
312 |
+
gr.Slider(minimum=1, maximum=30, step=1, value=20, label="Desired FPS"),
|
313 |
+
gr.Slider(minimum=1, maximum=64, step=1, value=16, label="Batch Size")
|
314 |
+
],
|
315 |
+
outputs=[
|
316 |
+
gr.Textbox(label="Anomaly Detection Results"),
|
317 |
+
gr.Plot(label="Anomaly Scores"),
|
318 |
+
gr.Plot(label="Fear Scores"),
|
319 |
+
gr.Plot(label="Sad Scores"),
|
320 |
+
gr.Plot(label="Angry Scores")
|
321 |
+
],
|
322 |
+
title="Video Anomaly Detection",
|
323 |
+
description="Upload a video to detect anomalies in facial expressions and emotions. Adjust parameters as needed."
|
324 |
+
)
|
325 |
+
|
326 |
+
iface.launch()
|