Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
import tensorflow as tf
|
4 |
+
from tensorflow.keras.applications import ResNet50
|
5 |
+
from tensorflow.keras.applications.resnet50 import preprocess_input
|
6 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
7 |
+
from filterpy.kalman import KalmanFilter
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
# Load the frozen inference graph
|
11 |
+
frozen_graph_path = "frozen_inference_graph.pb"
|
12 |
+
|
13 |
+
# Load the frozen TensorFlow model
|
14 |
+
with tf.io.gfile.GFile(frozen_graph_path, "rb") as f:
|
15 |
+
graph_def = tf.compat.v1.GraphDef()
|
16 |
+
graph_def.ParseFromString(f.read())
|
17 |
+
|
18 |
+
# Convert the frozen graph to a function
|
19 |
+
def wrap_frozen_graph(graph_def, inputs, outputs):
|
20 |
+
def _imports_graph_def():
|
21 |
+
tf.compat.v1.import_graph_def(graph_def, name="")
|
22 |
+
wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])
|
23 |
+
return wrapped_import.prune(
|
24 |
+
tf.nest.map_structure(wrapped_import.graph.as_graph_element, inputs),
|
25 |
+
tf.nest.map_structure(wrapped_import.graph.as_graph_element, outputs))
|
26 |
+
|
27 |
+
# Define input and output tensors
|
28 |
+
inputs = ["image_tensor:0"]
|
29 |
+
outputs = ["detection_boxes:0", "detection_scores:0", "detection_classes:0", "num_detections:0"]
|
30 |
+
|
31 |
+
# Get the detection function
|
32 |
+
detection_fn = wrap_frozen_graph(graph_def, inputs, outputs)
|
33 |
+
|
34 |
+
# TensorFlow function for detection
|
35 |
+
@tf.function(input_signature=[tf.TensorSpec(shape=[None, None, None, 3], dtype=tf.uint8)])
|
36 |
+
def detect_objects(image):
|
37 |
+
return detection_fn(image)
|
38 |
+
|
39 |
+
# Load ResNet50 for feature extraction
|
40 |
+
resnet_model = ResNet50(weights="imagenet", include_top=False, pooling="avg")
|
41 |
+
|
42 |
+
# Initialize variables to store features and identities
|
43 |
+
person_features = []
|
44 |
+
person_identities = []
|
45 |
+
person_colors = {}
|
46 |
+
kalman_filters = {}
|
47 |
+
next_person_id = 1 # Starting unique ID for persons
|
48 |
+
|
49 |
+
# Function to generate unique colors based on person ID
|
50 |
+
def get_color(person_id):
|
51 |
+
np.random.seed(person_id) # Ensure color is unique for each person_id
|
52 |
+
color = tuple(np.random.randint(0, 256, size=3)) # Generates RGB tuple
|
53 |
+
return (int(color[0]), int(color[1]), int(color[2])) # Ensure the color is a tuple of ints
|
54 |
+
|
55 |
+
def extract_features(person_roi):
|
56 |
+
# Resize and preprocess the ROI for ResNet50 input
|
57 |
+
person_roi_resized = cv2.resize(person_roi, (224, 224))
|
58 |
+
person_roi_preprocessed = preprocess_input(person_roi_resized)
|
59 |
+
|
60 |
+
# Add batch dimension for ResNet50 input
|
61 |
+
input_tensor = np.expand_dims(person_roi_preprocessed, axis=0)
|
62 |
+
|
63 |
+
# Extract features using ResNet50
|
64 |
+
features = resnet_model.predict(input_tensor)
|
65 |
+
return features
|
66 |
+
|
67 |
+
def initialize_kalman_filter(bbox):
|
68 |
+
kf = KalmanFilter(dim_x=7, dim_z=4)
|
69 |
+
kf.F = np.array([[1, 0, 0, 0, 1, 0, 0],
|
70 |
+
[0, 1, 0, 0, 0, 1, 0],
|
71 |
+
[0, 0, 1, 0, 0, 0, 1],
|
72 |
+
[0, 0, 0, 1, 0, 0, 0],
|
73 |
+
[0, 0, 0, 0, 1, 0, 0],
|
74 |
+
[0, 0, 0, 0, 0, 1, 0],
|
75 |
+
[0, 0, 0, 0, 0, 0, 1]])
|
76 |
+
kf.H = np.array([[1, 0, 0, 0, 0, 0, 0],
|
77 |
+
[0, 1, 0, 0, 0, 0, 0],
|
78 |
+
[0, 0, 0, 1, 0, 0, 0],
|
79 |
+
[0, 0, 0, 0, 0, 1, 0]])
|
80 |
+
kf.R[2:, 2:] *= 10.
|
81 |
+
kf.P[4:, 4:] *= 1000.
|
82 |
+
kf.P *= 10.
|
83 |
+
kf.Q[-1, -1] *= 0.01
|
84 |
+
kf.Q[4:, 4:] *= 0.01
|
85 |
+
kf.x[:4] = bbox.reshape((4, 1))
|
86 |
+
return kf
|
87 |
+
|
88 |
+
def predict_bbox(kf):
|
89 |
+
kf.predict()
|
90 |
+
return kf.x[:4].reshape((4,))
|
91 |
+
|
92 |
+
def update_kalman_filter(kf, bbox):
|
93 |
+
kf.update(bbox.reshape((4, 1)))
|
94 |
+
return kf
|
95 |
+
|
96 |
+
def match_and_identify(features, bbox):
|
97 |
+
global next_person_id
|
98 |
+
|
99 |
+
# Flag to check if a match is found
|
100 |
+
matched = False
|
101 |
+
|
102 |
+
# Iterate over existing identities to check for matches
|
103 |
+
for idx, (feat, identity) in enumerate(zip(person_features, person_identities)):
|
104 |
+
# Compute cosine similarity between features
|
105 |
+
similarity = cosine_similarity(
|
106 |
+
np.array(feat).reshape(1, -1),
|
107 |
+
np.array(features).reshape(1, -1)
|
108 |
+
)[0][0]
|
109 |
+
|
110 |
+
# If similarity is above threshold, consider them as the same person
|
111 |
+
similarity_threshold = 0.7 # Adjust as needed
|
112 |
+
if similarity > similarity_threshold:
|
113 |
+
# Assign color if not already assigned
|
114 |
+
if identity in person_colors:
|
115 |
+
color = person_colors[identity]
|
116 |
+
else:
|
117 |
+
color = get_color(identity)
|
118 |
+
person_colors[identity] = color
|
119 |
+
|
120 |
+
# Update Kalman filter
|
121 |
+
kalman_filters[identity] = update_kalman_filter(kalman_filters[identity], bbox)
|
122 |
+
|
123 |
+
# Set matched flag to True
|
124 |
+
matched = True
|
125 |
+
return identity, color
|
126 |
+
|
127 |
+
# If no match found, add new identity
|
128 |
+
if not matched:
|
129 |
+
person_features.append(features)
|
130 |
+
person_identities.append(next_person_id)
|
131 |
+
color = get_color(next_person_id)
|
132 |
+
person_colors[next_person_id] = color
|
133 |
+
|
134 |
+
# Initialize Kalman filter
|
135 |
+
kalman_filters[next_person_id] = initialize_kalman_filter(bbox)
|
136 |
+
|
137 |
+
identity = next_person_id
|
138 |
+
next_person_id += 1
|
139 |
+
|
140 |
+
return identity, color
|
141 |
+
|
142 |
+
def process_image(image):
|
143 |
+
# Prepare the image tensor
|
144 |
+
image_np = np.array(image)
|
145 |
+
input_tensor = np.expand_dims(image_np, axis=0)
|
146 |
+
|
147 |
+
# Run inference
|
148 |
+
detections = detect_objects(input_tensor)
|
149 |
+
|
150 |
+
# Extract output tensors and convert to numpy arrays
|
151 |
+
boxes = detections[0].numpy()[0]
|
152 |
+
scores = detections[1].numpy()[0]
|
153 |
+
classes = detections[2].numpy()[0]
|
154 |
+
num_detections = int(detections[3].numpy()[0])
|
155 |
+
|
156 |
+
# Filter detections for 'person' class
|
157 |
+
threshold = 0.3 # Adjust this threshold as needed
|
158 |
+
for i in range(num_detections):
|
159 |
+
class_id = int(classes[i])
|
160 |
+
score = scores[i]
|
161 |
+
box = boxes[i]
|
162 |
+
|
163 |
+
if class_id == 1 and score > threshold:
|
164 |
+
h, w, _ = image.shape
|
165 |
+
ymin, xmin, ymax, xmax = box
|
166 |
+
left, right, top, bottom = int(xmin * w), int(xmax * w), int(ymin * h), int(ymax * h)
|
167 |
+
|
168 |
+
# Extract person ROI
|
169 |
+
person_roi = image[top:bottom, left:right]
|
170 |
+
|
171 |
+
# Extract features
|
172 |
+
features = extract_features(person_roi)
|
173 |
+
|
174 |
+
# Predict bbox using Kalman filter
|
175 |
+
predicted_bbox = np.array([xmin, ymin, xmax, ymax])
|
176 |
+
|
177 |
+
# Match and identify
|
178 |
+
identity, color = match_and_identify(features, predicted_bbox)
|
179 |
+
|
180 |
+
# Draw bounding box
|
181 |
+
left, top, right, bottom = int(predicted_bbox[0] * w), int(predicted_bbox[1] * h), int(predicted_bbox[2] * w), int(predicted_bbox[3] * h)
|
182 |
+
cv2.rectangle(image, (left, top), (right, bottom), color, 2)
|
183 |
+
cv2.putText(image, f'Person {identity}', (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
184 |
+
|
185 |
+
return image
|
186 |
+
|
187 |
+
def gradio_interface(input_image):
|
188 |
+
# Process the input image
|
189 |
+
output_image = process_image(input_image)
|
190 |
+
return output_image
|
191 |
+
|
192 |
+
# Create Gradio interface
|
193 |
+
iface = gr.Interface(
|
194 |
+
fn=gradio_interface,
|
195 |
+
inputs=gr.Image(),
|
196 |
+
outputs=gr.Image(),
|
197 |
+
title="Person Detection and Tracking",
|
198 |
+
description="Upload an image to detect and track persons."
|
199 |
+
)
|
200 |
+
|
201 |
+
# Launch the interface
|
202 |
+
iface.launch()
|