Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from ultralytics import YOLO | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| import base64 | |
| import io | |
| # Load your local model | |
| model = YOLO('best.pt') | |
| def get_confidence_color(confidence): | |
| if confidence > 0.7: | |
| return (0, 255, 0) # Green for high confidence | |
| elif confidence > 0.4: | |
| return (0, 255, 255) # Yellow for medium confidence | |
| else: | |
| return (0, 0, 255) # Red for low confidence | |
| def draw_circle_with_count(image, box, count, confidence): | |
| x1, y1, x2, y2 = map(int, box) | |
| center_x = int((x1 + x2) / 2) | |
| center_y = int((y1 + y2) / 2) | |
| color = get_confidence_color(confidence) | |
| radius = 20 | |
| cv2.circle(image, (center_x, center_y), radius, color, 2) | |
| text = str(count) | |
| font = cv2.FONT_HERSHEY_SIMPLEX | |
| font_scale = 0.7 | |
| thickness = 2 | |
| (text_width, text_height), _ = cv2.getTextSize(text, font, font_scale, thickness) | |
| text_x = center_x - text_width // 2 | |
| text_y = center_y + text_height // 2 | |
| cv2.putText(image, text, (text_x, text_y), font, font_scale, color, thickness) | |
| return image | |
| def process_uploaded_image(image, imgsz, conf, iou): | |
| """Process the uploaded image for UI""" | |
| if image is None: | |
| return None, None, "No image to process" | |
| # Convert to PIL Image if it's numpy array | |
| if isinstance(image, np.ndarray): | |
| image_pil = Image.fromarray(image) | |
| else: | |
| image_pil = image | |
| # Run inference with parameters | |
| results = model( | |
| image_pil, | |
| imgsz=int(imgsz), | |
| conf=conf, | |
| iou=iou, | |
| ) | |
| # Convert image to numpy array for drawing | |
| image_np = np.array(image_pil) | |
| # Store detection results | |
| detection_counts = {} | |
| total_count = 0 | |
| for result in results: | |
| for box in result.boxes: | |
| total_count += 1 | |
| class_id = int(box.cls[0]) | |
| class_name = model.names[class_id] | |
| confidence = float(box.conf[0]) | |
| bbox = box.xyxy[0].tolist() | |
| # Draw circle | |
| image_np = draw_circle_with_count(image_np, bbox, total_count, confidence) | |
| # Update detection counts | |
| if class_name not in detection_counts: | |
| detection_counts[class_name] = { | |
| 'count': 0, | |
| 'low_conf': 0, | |
| 'med_conf': 0, | |
| 'high_conf': 0 | |
| } | |
| detection_counts[class_name]['count'] += 1 | |
| if confidence > 0.7: | |
| detection_counts[class_name]['high_conf'] += 1 | |
| elif confidence > 0.4: | |
| detection_counts[class_name]['med_conf'] += 1 | |
| else: | |
| detection_counts[class_name]['low_conf'] += 1 | |
| count_text = "Total Objects Detected: " + str(total_count) + "\n\n" | |
| count_text += "Detections by class:\n" | |
| for class_name, stats in detection_counts.items(): | |
| count_text += f"\n{class_name}:\n" | |
| count_text += f" Total: {stats['count']}\n" | |
| count_text += f" High confidence (>70%): {stats['high_conf']} π’\n" | |
| count_text += f" Medium confidence (40-70%): {stats['med_conf']} π‘\n" | |
| count_text += f" Low confidence (<40%): {stats['low_conf']} π΄\n" | |
| return image_np, count_text, "Image processed successfully" | |
| def process_api_image(image, imgsz, conf, iou): | |
| """Process the uploaded image for API calls""" | |
| if image is None: | |
| return None, None, "No image to process", None | |
| # Convert to PIL Image if it's numpy array | |
| if isinstance(image, np.ndarray): | |
| image_pil = Image.fromarray(image) | |
| else: | |
| image_pil = image | |
| # Run inference with parameters | |
| results = model( | |
| image_pil, | |
| imgsz=int(imgsz), | |
| conf=conf, | |
| iou=iou, | |
| ) | |
| # Convert image to numpy array for drawing | |
| image_np = np.array(image_pil) | |
| # Store detection results including bounding boxes | |
| detection_counts = {} | |
| total_count = 0 | |
| bounding_boxes = [] | |
| for result in results: | |
| for box in result.boxes: | |
| total_count += 1 | |
| class_id = int(box.cls[0]) | |
| class_name = model.names[class_id] | |
| confidence = float(box.conf[0]) | |
| bbox = box.xyxy[0].tolist() | |
| # Draw circle | |
| image_np = draw_circle_with_count(image_np, bbox, total_count, confidence) | |
| # Store bounding box information | |
| bounding_boxes.append({ | |
| 'box_id': total_count, | |
| 'class_name': class_name, | |
| 'confidence': confidence, | |
| 'coordinates': { | |
| 'x1': int(bbox[0]), | |
| 'y1': int(bbox[1]), | |
| 'x2': int(bbox[2]), | |
| 'y2': int(bbox[3]) | |
| } | |
| }) | |
| # Update detection counts | |
| if class_name not in detection_counts: | |
| detection_counts[class_name] = { | |
| 'count': 0, | |
| 'low_conf': 0, | |
| 'med_conf': 0, | |
| 'high_conf': 0 | |
| } | |
| detection_counts[class_name]['count'] += 1 | |
| if confidence > 0.7: | |
| detection_counts[class_name]['high_conf'] += 1 | |
| elif confidence > 0.4: | |
| detection_counts[class_name]['med_conf'] += 1 | |
| else: | |
| detection_counts[class_name]['low_conf'] += 1 | |
| count_text = "Total Objects Detected: " + str(total_count) + "\n\n" | |
| count_text += "Detections by class:\n" | |
| for class_name, stats in detection_counts.items(): | |
| count_text += f"\n{class_name}:\n" | |
| count_text += f" Total: {stats['count']}\n" | |
| count_text += f" High confidence (>70%): {stats['high_conf']} π’\n" | |
| count_text += f" Medium confidence (40-70%): {stats['med_conf']} π‘\n" | |
| count_text += f" Low confidence (<40%): {stats['low_conf']} π΄\n" | |
| return image_np, count_text, "Image processed successfully", bounding_boxes,results | |
| # Create the Gradio interface | |
| with gr.Blocks(theme=gr.themes.Soft()) as iface: | |
| gr.Markdown("# Image Object Detection with Confidence Indicators") | |
| with gr.Row(): | |
| # Left column with input controls | |
| with gr.Column(scale=1): | |
| # Image input | |
| image_input = gr.Image( | |
| label="Upload Image", | |
| type="numpy", | |
| height=300 | |
| ) | |
| # Parameters | |
| with gr.Group(): | |
| imgsz_dropdown = gr.Dropdown( | |
| choices=["320", "640"], | |
| value="640", | |
| label="Image Size (px)" | |
| ) | |
| conf_slider = gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.25, | |
| step=0.05, | |
| label="Confidence Threshold" | |
| ) | |
| iou_slider = gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.45, | |
| step=0.05, | |
| label="IoU Threshold" | |
| ) | |
| # Right column with results | |
| with gr.Column(scale=2): | |
| # Processed image output | |
| processed_output = gr.Image( | |
| label="Processed Image", | |
| height=400 | |
| ) | |
| # Detection results | |
| text_output = gr.Textbox( | |
| label="Detection Results", | |
| lines=10 | |
| ) | |
| # Process button | |
| process_btn = gr.Button("Process Image") | |
| # Status message | |
| status_text = gr.Textbox(label="Status", lines=1) | |
| # Connect the process button for UI | |
| process_btn.click( | |
| fn=process_uploaded_image, | |
| inputs=[ | |
| image_input, | |
| imgsz_dropdown, | |
| conf_slider, | |
| iou_slider | |
| ], | |
| outputs=[ | |
| processed_output, | |
| text_output, | |
| status_text | |
| ] | |
| ) | |
| # Add API endpoint for detection with bounding boxes | |
| iface.load( | |
| fn=process_api_image, | |
| inputs=[ | |
| gr.Image(type="numpy", label="Image"), | |
| gr.Number(value=640, label="Image Size"), | |
| gr.Number(value=0.25, label="Confidence Threshold"), | |
| gr.Number(value=0.45, label="IoU Threshold") | |
| ], | |
| outputs=[ | |
| gr.Image(type="numpy", label="Processed Image"), | |
| gr.Textbox(label="Detection Results"), | |
| gr.Textbox(label="Status"), | |
| gr.JSON(label="Bounding Boxes"), | |
| ], | |
| api_name="detect" # This creates /api/detect endpoint | |
| ) | |
| # Launch the interface | |
| if __name__ == "__main__": | |
| iface.queue() | |
| iface.launch() |