#!/usr/bin/env python3 """ YOLO Object Detection with Gradio Interface Optimized for Hugging Face Spaces deployment """ import gradio as gr import cv2 import numpy as np from ultralytics import YOLO from PIL import Image import torch import spaces import os import tempfile # Global variable for models models = {} current_model_size = 'nano' def load_model(model_size='nano'): """ Load YOLO model based on selected size """ global models, current_model_size model_names = { 'nano': 'yolov8n.pt', 'small': 'yolov8s.pt', 'medium': 'yolov8m.pt', 'large': 'yolov8l.pt', 'xlarge': 'yolov8x.pt' } model_name = model_names.get(model_size, 'yolov8n.pt') # Check if model already loaded if model_size not in models: print(f"Loading {model_name}...") models[model_size] = YOLO(model_name) current_model_size = model_size # Check if CUDA is available if torch.cuda.is_available(): return f"â Model {model_name} loaded successfully! (GPU enabled)" else: return f"â Model {model_name} loaded successfully! (CPU mode)" else: current_model_size = model_size return f"â Model {model_name} already loaded!" # Use @spaces.GPU decorator for GPU functions on Hugging Face Spaces @spaces.GPU(duration=60) def detect_image(input_image, model_size, conf_threshold=0.25, iou_threshold=0.45): """ Perform object detection on a single image """ if model_size not in models: load_model(model_size) model = models[model_size] if input_image is None: return None, "No image provided" # Convert PIL Image to numpy array if necessary if isinstance(input_image, Image.Image): input_image = np.array(input_image) # Run inference results = model(input_image, conf=conf_threshold, iou=iou_threshold) # Get annotated image annotated_image = results[0].plot() # Get detection details detections = [] for r in results: if r.boxes is not None: for box in r.boxes: if box.cls is not None: class_id = int(box.cls) class_name = model.names[class_id] confidence = float(box.conf) bbox = box.xyxy[0].tolist() detections.append({ 'class': class_name, 'confidence': f"{confidence:.2%}", 'bbox': [int(x) for x in bbox] }) # Create detection summary summary = f"Found {len(detections)} object(s)\n\n" if detections: # Count occurrences of each class class_counts = {} for det in detections: class_name = det['class'] if class_name not in class_counts: class_counts[class_name] = 0 class_counts[class_name] += 1 summary += "Summary by class:\n" for class_name, count in class_counts.items(): summary += f" âĸ {class_name}: {count}\n" summary += "\nDetailed detections:\n" for i, det in enumerate(detections, 1): summary += f"{i}. {det['class']} ({det['confidence']})\n" return annotated_image, summary @spaces.GPU(duration=120) def detect_video(input_video, model_size, conf_threshold=0.25, iou_threshold=0.45, max_frames=300): """ Perform object detection on video """ if model_size not in models: load_model(model_size) model = models[model_size] if input_video is None: return None, "No video provided" # Open video cap = cv2.VideoCapture(input_video) fps = int(cap.get(cv2.CAP_PROP_FPS)) if fps == 0: fps = 25 # Default fallback FPS width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Limit processing for Spaces if max_frames and total_frames > max_frames: total_frames = max_frames # Create temporary output file with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file: output_path = tmp_file.name # Setup video writer fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) frame_count = 0 detected_objects = set() # Process progress callback def progress_callback(current, total): return (current / total) if total > 0 else 0 # Process video progress = gr.Progress() while cap.isOpened() and frame_count < total_frames: ret, frame = cap.read() if not ret: break # Run detection results = model(frame, conf=conf_threshold, iou=iou_threshold) # Collect detected classes for r in results: if r.boxes is not None: for box in r.boxes: if box.cls is not None: class_id = int(box.cls) detected_objects.add(model.names[class_id]) # Get annotated frame annotated_frame = results[0].plot() # Write frame out.write(annotated_frame) frame_count += 1 # Update progress if frame_count % 10 == 0: progress(frame_count / total_frames, desc=f"Processing frame {frame_count}/{total_frames}") # Clean up cap.release() out.release() # Create summary summary = f"Processed {frame_count} frames\n" summary += f"Detected objects: {', '.join(sorted(detected_objects))}" if detected_objects else "No objects detected" return output_path, summary # Create Gradio interface def create_interface(): with gr.Blocks( title="YOLO Object Detection", theme=gr.themes.Soft(), css=""" .gradio-container { max-width: 1200px !important; } #title { text-align: center; margin-bottom: 1rem; } """ ) as demo: gr.Markdown( """
Powered by Ultralytics YOLOv8 - State-of-the-art object detection in your browser!
[](https://huggingface.co/spaces/YOUR_USERNAME/YOUR_SPACE_NAME?duplicate=true) [](https://github.com/ultralytics/ultralytics) [](https://github.com/ultralytics/ultralytics/blob/main/LICENSE)