richardsl commited on
Commit
55d8422
1 Parent(s): 5b449c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -16
app.py CHANGED
@@ -1,41 +1,51 @@
1
  import gradio as gr
2
  import cv2
3
- import numpy as np
4
- # Assuming YOLO and SORT are available as Python packages or modules
5
- from yolov3 import YOLO # This is hypothetical; actual import will depend on your YOLO version
6
  from sort import Sort
7
 
8
  # Initialize YOLO and SORT
9
- yolo = YOLO() # Hypothetical initialization; actual initialization will depend on YOLO implementation
10
- tracker = Sort()
11
-
12
- def process_frame(frame):
13
- # Your code to process the frame, detect people, and return bounding boxes
14
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  def count_people(video_file):
17
  cap = cv2.VideoCapture(video_file)
18
- unique_people = {} # Dictionary to store unique people images
19
 
20
  while cap.isOpened():
21
  ret, frame = cap.read()
22
  if not ret:
23
  break
24
 
25
- detections = process_frame(frame) # Get detections from YOLO
26
- tracked_objects = tracker.update(detections) # Update SORT tracker
27
 
28
  for obj in tracked_objects:
29
  obj_id = int(obj[4])
30
  if obj_id not in unique_people:
31
- # Crop and store the image of the person
32
- x1, y1, x2, y2 = int(obj[0]), int(obj[1]), int(obj[2]), int(obj[3])
33
  person_img = frame[y1:y2, x1:x2]
34
  unique_people[obj_id] = person_img
35
 
36
  cap.release()
37
 
38
- # Convert images to a format suitable for Gradio output
39
  output_images = [Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) for img in unique_people.values()]
40
  return len(unique_people), output_images
41
 
@@ -48,4 +58,4 @@ iface = gr.Interface(
48
  )
49
 
50
  if __name__ == "__main__":
51
- iface.launch()
 
1
  import gradio as gr
2
  import cv2
3
+ from PIL import Image
4
+ from yolov3 import YOLO # Update this import based on your YOLO implementation
 
5
  from sort import Sort
6
 
7
  # Initialize YOLO and SORT
8
+ yolo = YOLO()
9
+ tracker = Sort()
10
+
11
+ def detect_people(frame):
12
+ try:
13
+ # Add YOLO detection logic here
14
+ # Return the detected bounding boxes
15
+ return []
16
+ except Exception as e:
17
+ print(f"Detection Error: {e}")
18
+ return []
19
+
20
+ def track_people(detections):
21
+ try:
22
+ # Update SORT tracker with the detections
23
+ return tracker.update(detections)
24
+ except Exception as e:
25
+ print(f"Tracking Error: {e}")
26
+ return []
27
 
28
  def count_people(video_file):
29
  cap = cv2.VideoCapture(video_file)
30
+ unique_people = {}
31
 
32
  while cap.isOpened():
33
  ret, frame = cap.read()
34
  if not ret:
35
  break
36
 
37
+ detections = detect_people(frame)
38
+ tracked_objects = track_people(detections)
39
 
40
  for obj in tracked_objects:
41
  obj_id = int(obj[4])
42
  if obj_id not in unique_people:
43
+ x1, y1, x2, y2 = map(int, obj[:4])
 
44
  person_img = frame[y1:y2, x1:x2]
45
  unique_people[obj_id] = person_img
46
 
47
  cap.release()
48
 
 
49
  output_images = [Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) for img in unique_people.values()]
50
  return len(unique_people), output_images
51
 
 
58
  )
59
 
60
  if __name__ == "__main__":
61
+ iface.launch()