Ashu1803 commited on
Commit
222e5a4
1 Parent(s): 14a7eac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -2
app.py CHANGED
@@ -32,7 +32,7 @@ outputs = ["detection_boxes:0", "detection_scores:0", "detection_classes:0", "nu
32
  detection_fn = wrap_frozen_graph(graph_def, inputs, outputs)
33
 
34
  # TensorFlow function for detection
35
- @tf.function(input_signature=[tf.TensorSpec(shape=[None, None, None, 3], dtype=tf.uint8)])
36
  def detect_objects(image):
37
  return detection_fn(image)
38
 
@@ -139,8 +139,20 @@ def match_and_identify(features, bbox):
139
 
140
  return identity, color
141
 
 
 
 
 
 
 
 
 
 
 
 
142
  def process_image(image):
143
  if image is None:
 
144
  return None
145
 
146
  # Convert image to RGB if it's not
@@ -167,6 +179,8 @@ def process_image(image):
167
  classes = detections[2].numpy()[0]
168
  num_detections = int(detections[3].numpy()[0])
169
 
 
 
170
  # Filter detections for 'person' class
171
  threshold = 0.3 # Adjust this threshold as needed
172
  for i in range(num_detections):
@@ -196,13 +210,17 @@ def process_image(image):
196
  cv2.rectangle(image, (left, top), (right, bottom), color, 2)
197
  cv2.putText(image, f'Person {identity}', (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
198
 
 
 
199
  except Exception as e:
200
  print(f"Error during processing: {str(e)}")
201
  return image # Return original image if there's an error
202
 
203
  return image
 
204
  def gradio_interface(input_image):
205
  if input_image is None:
 
206
  return None
207
 
208
  # Convert PIL Image to numpy array if necessary
@@ -211,6 +229,18 @@ def gradio_interface(input_image):
211
 
212
  # Process the input image
213
  output_image = process_image(input_image)
 
 
 
 
 
 
 
 
 
 
 
 
214
  return output_image
215
 
216
  # Create Gradio interface
@@ -219,7 +249,7 @@ iface = gr.Interface(
219
  inputs=gr.Image(),
220
  outputs=gr.Image(),
221
  title="Person Detection and Tracking",
222
- description="Upload an image to detect and track persons."
223
  )
224
 
225
  # Launch the interface
 
32
  detection_fn = wrap_frozen_graph(graph_def, inputs, outputs)
33
 
34
  # TensorFlow function for detection
35
+ @tf.function(input_signature=[tf.TensorSpec(shape=[1, None, None, 3], dtype=tf.uint8)])
36
  def detect_objects(image):
37
  return detection_fn(image)
38
 
 
139
 
140
  return identity, color
141
 
142
+ import numpy as np
143
+ import cv2
144
+ import tensorflow as tf
145
+ from tensorflow.keras.applications import ResNet50
146
+ from tensorflow.keras.applications.resnet50 import preprocess_input
147
+ from sklearn.metrics.pairwise import cosine_similarity
148
+ from filterpy.kalman import KalmanFilter
149
+ import gradio as gr
150
+
151
+ # ... (previous code remains the same) ...
152
+
153
  def process_image(image):
154
  if image is None:
155
+ print("Input image is None")
156
  return None
157
 
158
  # Convert image to RGB if it's not
 
179
  classes = detections[2].numpy()[0]
180
  num_detections = int(detections[3].numpy()[0])
181
 
182
+ print(f"Number of detections: {num_detections}")
183
+
184
  # Filter detections for 'person' class
185
  threshold = 0.3 # Adjust this threshold as needed
186
  for i in range(num_detections):
 
210
  cv2.rectangle(image, (left, top), (right, bottom), color, 2)
211
  cv2.putText(image, f'Person {identity}', (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
212
 
213
+ print(f"Detected person {identity} at ({left}, {top}, {right}, {bottom})")
214
+
215
  except Exception as e:
216
  print(f"Error during processing: {str(e)}")
217
  return image # Return original image if there's an error
218
 
219
  return image
220
+
221
  def gradio_interface(input_image):
222
  if input_image is None:
223
+ print("Input image is None")
224
  return None
225
 
226
  # Convert PIL Image to numpy array if necessary
 
229
 
230
  # Process the input image
231
  output_image = process_image(input_image)
232
+
233
+ if output_image is None:
234
+ print("Output image is None")
235
+ return None
236
+
237
+ print(f"Output image shape: {output_image.shape}")
238
+ print(f"Output image dtype: {output_image.dtype}")
239
+
240
+ # Ensure the output is in the correct format for Gradio
241
+ if output_image.dtype != np.uint8:
242
+ output_image = (output_image * 255).astype(np.uint8)
243
+
244
  return output_image
245
 
246
  # Create Gradio interface
 
249
  inputs=gr.Image(),
250
  outputs=gr.Image(),
251
  title="Person Detection and Tracking",
252
+ description="Upload an image to detect and track persons.",
253
  )
254
 
255
  # Launch the interface