hb-setosys commited on
Commit
c3b9d4f
·
verified ·
1 Parent(s): 43e0c53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -17
app.py CHANGED
@@ -49,13 +49,7 @@ def count_people_in_frame(frame):
49
  # Apply Non-Maximum Suppression (NMS)
50
  indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4) if boxes else []
51
 
52
- # Draw bounding boxes on the image
53
- for i in indexes:
54
- x, y, w, h = boxes[i]
55
- cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
56
-
57
- # Return processed frame and number of people detected
58
- return frame, len(indexes)
59
 
60
  def count_people_video(video_path):
61
  """
@@ -77,7 +71,7 @@ def count_people_video(video_path):
77
  break
78
 
79
  # Count people in the frame
80
- _, people_count = count_people_in_frame(frame)
81
  people_per_frame.append(people_count)
82
 
83
  frame_count += 1
@@ -85,18 +79,23 @@ def count_people_video(video_path):
85
  cap.release()
86
 
87
  # Generate analytics
88
- return {
89
- "People in Video": int(np.max(people_per_frame)) if people_per_frame else 0,
90
- }
91
 
92
  def analyze_video(video_file):
93
- result = count_people_video(video_file)
94
- return "\n".join([f"{key}: {value}" for key, value in result.items()])
 
 
 
 
 
 
 
95
 
96
  def analyze_image(image):
97
  image_cv = np.array(image) # Convert PIL image to NumPy array
98
- processed_image, people_count = count_people_in_frame(image_cv)
99
- return processed_image, f"People in Image: {people_count}"
100
 
101
  # Gradio Interface for Image Processing
102
  image_interface = gr.Interface(
@@ -110,7 +109,7 @@ image_interface = gr.Interface(
110
  # Gradio Interface for Video Processing
111
  video_interface = gr.Interface(
112
  fn=analyze_video,
113
- inputs=gr.Video(label="Upload Video"),
114
  outputs=gr.Textbox(label="People Counting Results"),
115
  title="YOLO People Counter (Video)",
116
  description="Upload a video to detect and count people using YOLOv3."
@@ -119,7 +118,7 @@ video_interface = gr.Interface(
119
  # Combine both interfaces into tabs
120
  app = gr.TabbedInterface(
121
  [image_interface, video_interface],
122
- tab_names=["Image Mode", "Video Mode"] # Explicitly define tab names
123
  )
124
 
125
  # Launch app
 
49
  # Apply Non-Maximum Suppression (NMS)
50
  indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4) if boxes else []
51
 
52
+ return len(indexes)
 
 
 
 
 
 
53
 
54
  def count_people_video(video_path):
55
  """
 
71
  break
72
 
73
  # Count people in the frame
74
+ people_count = count_people_in_frame(frame)
75
  people_per_frame.append(people_count)
76
 
77
  frame_count += 1
 
79
  cap.release()
80
 
81
  # Generate analytics
82
+ return f"Max People Detected in Video: {max(people_per_frame) if people_per_frame else 0}"
 
 
83
 
84
  def analyze_video(video_file):
85
+ # Extract video path from uploaded file
86
+ video_path = video_file if isinstance(video_file, str) else video_file.name
87
+
88
+ # Ensure path exists
89
+ if not os.path.exists(video_path):
90
+ return "Error: Video file could not be loaded."
91
+
92
+ result = count_people_video(video_path)
93
+ return result
94
 
95
  def analyze_image(image):
96
  image_cv = np.array(image) # Convert PIL image to NumPy array
97
+ people_count = count_people_in_frame(image_cv)
98
+ return image, f"People in Image: {people_count}"
99
 
100
  # Gradio Interface for Image Processing
101
  image_interface = gr.Interface(
 
109
  # Gradio Interface for Video Processing
110
  video_interface = gr.Interface(
111
  fn=analyze_video,
112
+ inputs=gr.Video(type="file", label="Upload Video"), # Ensure video is treated as a file
113
  outputs=gr.Textbox(label="People Counting Results"),
114
  title="YOLO People Counter (Video)",
115
  description="Upload a video to detect and count people using YOLOv3."
 
118
  # Combine both interfaces into tabs
119
  app = gr.TabbedInterface(
120
  [image_interface, video_interface],
121
+ tab_names=["Image Mode", "Video Mode"]
122
  )
123
 
124
  # Launch app