richardsl commited on
Commit
6082c89
1 Parent(s): 0bb2064

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -3
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  from huggingface_hub import hf_hub_download
3
  from ultralytics import YOLO
@@ -5,43 +6,58 @@ from PIL import Image
5
  import cv2
6
  import numpy as np
7
 
 
8
  # Download the YOLOv8 model for face detection
9
  model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt")
10
  model = YOLO(model_path)
11
 
 
12
  def process_video(video_path):
13
  # Open the video file
14
  cap = cv2.VideoCapture(video_path)
15
  unique_faces = set()
16
 
 
17
  while cap.isOpened():
18
  ret, frame = cap.read()
19
  if not ret:
20
  break
21
 
 
22
  # Convert the frame to PIL Image
23
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
24
  pil_image = Image.fromarray(frame)
25
 
 
26
  # Detect faces in the frame
27
  output = model(pil_image)
28
  faces = output.pred[0]
29
 
 
30
  # Iterate over detected faces and add them to the set
31
  for face in faces:
32
  face_data = tuple(face.numpy())
33
  unique_faces.add(face_data)
34
 
 
35
  cap.release()
36
 
 
37
  return len(unique_faces)
38
  # Gradio interface
39
  iface = gr.Interface(
40
  fn=process_video,
41
  inputs=gr.inputs.Video(source="upload", label="Upload a Video"),
42
  outputs="number",
43
- title="Unique Face Counter in Video",
44
- description="Upload a video to count the total number of unique faces."
 
 
 
 
 
45
  )
46
 
47
- iface.launch()
 
 
 
1
+
2
  import gradio as gr
3
  from huggingface_hub import hf_hub_download
4
  from ultralytics import YOLO
 
6
  import cv2
7
  import numpy as np
8
 
9
+
10
  # Download the YOLOv8 model for face detection
11
  model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt")
12
  model = YOLO(model_path)
13
 
14
+
15
  def process_video(video_path):
16
  # Open the video file
17
  cap = cv2.VideoCapture(video_path)
18
  unique_faces = set()
19
 
20
+
21
  while cap.isOpened():
22
  ret, frame = cap.read()
23
  if not ret:
24
  break
25
 
26
+
27
  # Convert the frame to PIL Image
28
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
29
  pil_image = Image.fromarray(frame)
30
 
31
+
32
  # Detect faces in the frame
33
  output = model(pil_image)
34
  faces = output.pred[0]
35
 
36
+
37
  # Iterate over detected faces and add them to the set
38
  for face in faces:
39
  face_data = tuple(face.numpy())
40
  unique_faces.add(face_data)
41
 
42
+
43
  cap.release()
44
 
45
+
46
  return len(unique_faces)
47
  # Gradio interface
48
  iface = gr.Interface(
49
  fn=process_video,
50
  inputs=gr.inputs.Video(source="upload", label="Upload a Video"),
51
  outputs="number",
52
+ title="Unique Face Counter in Video",# Create the Gradio interface
53
+
54
+
55
+ inputs=gr.Video(label="Upload Video"),
56
+ outputs=gr.Textbox(label="Number of People Detected"),
57
+ title="People Counter",
58
+ description="Upload a video to count the number of people present."
59
  )
60
 
61
+
62
+ if __name__ == "__main__":
63
+ iface.launch()