|
import cv2 |
|
from huggingface_hub import hf_hub_download |
|
from ultralytics import YOLO |
|
from supervision import Detections |
|
from PIL import Image |
|
import torch |
|
import numpy as np |
|
import gradio as gr |
|
|
|
|
|
def draw_rect_with_conf(image, detections): |
|
for detection in detections: |
|
|
|
box, _, conf, _, _ = detection |
|
x1, y1, x2, y2 = box.astype(int) |
|
|
|
|
|
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2) |
|
|
|
|
|
cv2.putText(image, f'Accuracy: {conf:.2f}', (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) |
|
return image |
|
|
|
def detect_faces(input_img): |
|
|
|
model_path = hf_hub_download( |
|
repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt" |
|
) |
|
|
|
|
|
model = YOLO(model_path) |
|
|
|
|
|
image_cv = cv2.cvtColor(np.array(input_img), cv2.COLOR_RGB2BGR) |
|
|
|
|
|
output = model(input_img) |
|
results = Detections.from_ultralytics(output[0]) |
|
|
|
|
|
drawn_image = draw_rect_with_conf(image_cv, results) |
|
|
|
|
|
drawn_image_pil = Image.fromarray(cv2.cvtColor(drawn_image, cv2.COLOR_BGR2RGB)) |
|
return drawn_image_pil |
|
|
|
def gradio_interface(input_img): |
|
|
|
detected_img = detect_faces(input_img) |
|
return detected_img |
|
|
|
|
|
demo = gr.Interface(fn=gradio_interface, inputs=gr.Image(type="pil"), outputs="image") |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|