import gradio as gr from huggingface_hub import hf_hub_download from ultralytics import YOLO from supervision import Detections from PIL import Image import numpy as np import cv2 # Download and load the model model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt") model = YOLO(model_path) # Define prediction function def detect_faces(image): # Run inference output = model(image) detections = Detections.from_ultralytics(output[0]) num_faces = len(detections.xyxy) # Convert PIL image to OpenCV format image_np = np.array(image) image_cv = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) # Draw bounding boxes for box in detections.xyxy: x1, y1, x2, y2 = map(int, box) cv2.rectangle(image_cv, (x1, y1), (x2, y2), (0, 255, 0), 2) # Convert back to PIL image result_image = Image.fromarray(cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)) return result_image, num_faces # Gradio Interface demo = gr.Interface( fn=detect_faces, inputs=gr.Image(type="pil", label="Upload Image"), outputs=[ gr.Image(type="pil", label="Detected Faces"), gr.Number(label="Face Count") ], title="Face Detection with YOLOv8", description="Drag and drop an image or click to upload. The model will detect faces using YOLOv8.", live=False ) demo.launch()