import gradio as gr from huggingface_hub import hf_hub_download from ultralytics import YOLO from PIL import Image import cv2 import numpy as np # Download model from Hugging Face Hub model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt") model = YOLO(model_path) def process_video(video_path): # Open the video file cap = cv2.VideoCapture(video_path) unique_faces = set() while cap.isOpened(): ret, frame = cap.read() if not ret: break # Convert the frame to PIL Image frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) pil_image = Image.fromarray(frame) # Detect faces in the frame output = model(pil_image) faces = output.pred[0] # Iterate over detected faces and add them to the set for face in faces: face_data = tuple(face.numpy()) unique_faces.add(face_data) cap.release() return len(unique_faces) # Gradio interface iface = gr.Interface( fn=process_video, inputs=gr.Video(label="Upload a Video"), outputs="number", title="Unique Face Counter in Video" ) if __name__ == "__main__": iface.launch()