| | import cv2 |
| | import gradio as gr |
| | import numpy as np |
| | import tempfile |
| | import os |
| | from ultralytics import YOLO |
| | from deep_sort_realtime.deepsort_tracker import DeepSort |
| | from collections import defaultdict |
| |
|
| | |
| | class_counts = defaultdict(set) |
| |
|
| | |
| | model = YOLO("best.pt") |
| |
|
| | |
| | tracker = DeepSort(max_age=30) |
| |
|
| | |
| | def detect_on_image(image): |
| | results = model(image)[0] |
| | for box in results.boxes: |
| | cls_id = int(box.cls[0]) |
| | conf = float(box.conf[0]) |
| | x1, y1, x2, y2 = map(int, box.xyxy[0]) |
| | if conf > 0.4: |
| | label = f"{model.names[cls_id]} {conf:.2f}" |
| | cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2) |
| | cv2.putText(image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 2) |
| | return image |
| |
|
| | |
| | def detect_and_track_video(video_path): |
| | if not os.path.exists(video_path): |
| | return None |
| |
|
| | cap = cv2.VideoCapture(video_path) |
| | width = int(cap.get(3)) |
| | height = int(cap.get(4)) |
| | fps = cap.get(cv2.CAP_PROP_FPS) |
| | temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) |
| | out = cv2.VideoWriter(temp_output.name, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) |
| | class_counts.clear() |
| |
|
| | while cap.isOpened(): |
| | ret, frame = cap.read() |
| | if not ret: |
| | break |
| |
|
| | results = model(frame)[0] |
| | detections = [] |
| |
|
| | for box in results.boxes: |
| | cls_id = int(box.cls[0]) |
| | conf = float(box.conf[0]) |
| | x1, y1, x2, y2 = map(int, box.xyxy[0]) |
| | if conf > 0.4: |
| | detections.append(([x1, y1, x2 - x1, y2 - y1], conf, model.names[cls_id])) |
| |
|
| | tracks = tracker.update_tracks(detections, frame=frame) |
| |
|
| | for track in tracks: |
| | if not track.is_confirmed(): |
| | continue |
| | track_id = track.track_id |
| | l, t, r, b = map(int, track.to_ltrb()) |
| | label = track.get_det_class() |
| | cv2.rectangle(frame, (l, t), (r, b), (0, 255, 0), 2) |
| | cv2.putText(frame, f'{label} ID {track_id}', (l, t - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) |
| | class_counts[label].add(track_id) |
| |
|
| | out.write(frame) |
| |
|
| | cap.release() |
| | out.release() |
| |
|
| | return temp_output.name |
| |
|
| | |
| | image_interface = gr.Interface( |
| | fn=detect_on_image, |
| | inputs=gr.Image(type="numpy", label="Image de surveillance"), |
| | outputs=gr.Image(type="numpy", label="Image annotée"), |
| | title="📸 Détection sur Image", |
| | description="Détection de bagages et objets avec YOLOv8." |
| | ) |
| |
|
| | video_interface = gr.Interface( |
| | fn=detect_and_track_video, |
| | inputs=gr.Video(label="Vidéo de surveillance"), |
| | outputs=gr.Video(label="Vidéo annotée avec suivi"), |
| | title="🎥 Suivi sur Vidéo", |
| | description="Suivi multi-objets avec DeepSORT + YOLOv8." |
| | ) |
| |
|
| | |
| | gr.TabbedInterface( |
| | [image_interface, video_interface], |
| | tab_names=["📷 Image", "🎥 Vidéo"] |
| | ).launch() |