hb-setosys commited on
Commit
b4a375f
·
verified ·
1 Parent(s): 0e46ad2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +112 -0
app.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ import os
5
+
6
+ # Load YOLO model
7
+ net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
8
+
9
+ # Set backend (CPU or GPU)
10
+ net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
11
+ net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
12
+
13
+ # Load class names
14
+ with open('coco.names', 'r') as f:
15
+ classes = [line.strip() for line in f.readlines()]
16
+
17
+ # Get YOLO output layer names
18
+ output_layers_names = net.getUnconnectedOutLayersNames()
19
+
20
+ def count_people_in_frame(frame):
21
+ """
22
+ Detects people in a given frame (image) and returns count.
23
+ """
24
+ height, width, _ = frame.shape
25
+
26
+ # Convert frame to YOLO format
27
+ blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416, 416), swapRB=True, crop=False)
28
+ net.setInput(blob)
29
+
30
+ # Forward pass
31
+ layer_outputs = net.forward(output_layers_names)
32
+
33
+ # Process detections
34
+ boxes, confidences = [], []
35
+ for output in layer_outputs:
36
+ for detection in output:
37
+ scores = detection[5:]
38
+ class_id = np.argmax(scores)
39
+ confidence = scores[class_id]
40
+
41
+ if classes[class_id] == 'person' and confidence > 0.5:
42
+ center_x, center_y = int(detection[0] * width), int(detection[1] * height)
43
+ w, h = int(detection[2] * width), int(detection[3] * height)
44
+ x, y = int(center_x - w / 2), int(center_y - h / 2)
45
+
46
+ boxes.append([x, y, w, h])
47
+ confidences.append(float(confidence))
48
+
49
+ # Apply Non-Maximum Suppression (NMS)
50
+ indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4) if boxes else []
51
+
52
+ # Draw bounding boxes on the image
53
+ for i in indexes:
54
+ x, y, w, h = boxes[i]
55
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
56
+
57
+ # Return processed frame and number of people detected
58
+ return frame, len(indexes)
59
+
60
+ def count_people_video(video_path):
61
+ """
62
+ Process video and count people per frame.
63
+ """
64
+ if not os.path.exists(video_path):
65
+ return "Error: Video file not found."
66
+
67
+ cap = cv2.VideoCapture(video_path)
68
+ if not cap.isOpened():
69
+ return "Error: Unable to open video file."
70
+
71
+ frame_count = 0
72
+ people_per_frame = []
73
+
74
+ while True:
75
+ ret, frame = cap.read()
76
+ if not ret:
77
+ break
78
+
79
+ # Count people in the frame
80
+ _, people_count = count_people_in_frame(frame)
81
+ people_per_frame.append(people_count)
82
+
83
+ frame_count += 1
84
+
85
+ cap.release()
86
+
87
+ # Generate analytics
88
+ return {
89
+ "People in Video": int(np.max(people_per_frame)) if people_per_frame else 0,
90
+ }
91
+
92
+ def analyze_video(video_file):
93
+ result = count_people_video(video_file)
94
+ return "\n".join([f"{key}: {value}" for key, value in result.items()])
95
+
96
+ def analyze_image(image):
97
+ image_cv = np.array(image) # Convert PIL image to NumPy array
98
+ processed_image, people_count = count_people_in_frame(image_cv)
99
+ return processed_image, f"People in Image: {people_count}"
100
+
101
+ # Gradio Interface
102
+ interface = gr.Interface(
103
+ fn=[analyze_image, analyze_video], # Supports both image & video
104
+ inputs=[gr.Image(type="pil", label="Upload Image"), gr.Video(label="Upload Video")],
105
+ outputs=[gr.Image(label="Processed Image"), gr.Textbox(label="People Counting Results")],
106
+ title="YOLO-based People Counter",
107
+ description="Upload an image or video to detect and count people using YOLOv3."
108
+ )
109
+
110
+ # Launch app
111
+ if __name__ == "__main__":
112
+ interface.launch()