|
|
|
|
|
|
|
|
import torch |
|
|
from transformers import AutoImageProcessor, AutoModelForObjectDetection |
|
|
import cv2 |
|
|
from PIL import Image |
|
|
import numpy as np |
|
|
import gradio as gr |
|
|
import os |
|
|
|
|
|
|
|
|
|
|
|
from sort import Sort |
|
|
|
|
|
|
|
|
|
|
|
print("Loading model and processor...") |
|
|
model_checkpoint = "facebook/detr-resnet-50" |
|
|
image_processor = AutoImageProcessor.from_pretrained(model_checkpoint) |
|
|
model = AutoModelForObjectDetection.from_pretrained( |
|
|
model_checkpoint, |
|
|
trust_remote_code=True |
|
|
) |
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
model.to(device) |
|
|
print("Model loaded successfully.") |
|
|
|
|
|
|
|
|
def iou(boxA, boxB): |
|
|
|
|
|
xA = max(boxA[0], boxB[0]) |
|
|
yA = max(boxA[1], boxB[1]) |
|
|
xB = min(boxA[2], boxB[2]) |
|
|
yB = min(boxA[3], boxB[3]) |
|
|
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1) |
|
|
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) |
|
|
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) |
|
|
iou_score = interArea / float(boxAArea + boxBArea - interArea) |
|
|
return iou_score |
|
|
|
|
|
|
|
|
def process_video(input_video_path): |
|
|
|
|
|
tracker = Sort(min_hits=3, iou_threshold=0.3) |
|
|
total_counts = {'person': 0, 'bicycle': 0, 'car': 0, 'motorcycle': 0} |
|
|
counted_ids = set() |
|
|
|
|
|
|
|
|
output_video_path = "output.mp4" |
|
|
|
|
|
cap = cv2.VideoCapture(input_video_path) |
|
|
if not cap.isOpened(): |
|
|
raise gr.Error(f"Could not open video file.") |
|
|
|
|
|
fps = int(cap.get(cv2.CAP_PROP_FPS)) |
|
|
|
|
|
|
|
|
new_width = 960 |
|
|
new_height = 540 |
|
|
|
|
|
|
|
|
out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (new_width, new_height)) |
|
|
|
|
|
frame_number = 0 |
|
|
while True: |
|
|
ret, frame = cap.read() |
|
|
if not ret: |
|
|
break |
|
|
|
|
|
frame_number += 1 |
|
|
|
|
|
|
|
|
frame = cv2.resize(frame, (new_width, new_height)) |
|
|
|
|
|
|
|
|
pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) |
|
|
inputs = image_processor(images=pil_image, return_tensors="pt").to(device) |
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
target_sizes = torch.tensor([pil_image.size[::-1]]) |
|
|
results = image_processor.post_process_object_detection(outputs, threshold=0.6, target_sizes=target_sizes)[0] |
|
|
|
|
|
|
|
|
detections_for_sort = [] |
|
|
original_detections = [] |
|
|
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): |
|
|
label_name = model.config.id2label[label.item()] |
|
|
if label_name in total_counts: |
|
|
box_list = box.tolist() |
|
|
detections_for_sort.append([box_list[0], box_list[1], box_list[2], box_list[3], score.item()]) |
|
|
original_detections.append({'box': box_list, 'label': label_name}) |
|
|
|
|
|
|
|
|
tracked_objects_raw = [] |
|
|
if len(detections_for_sort) > 0: |
|
|
tracked_objects_raw = tracker.update(np.array(detections_for_sort)) |
|
|
|
|
|
|
|
|
for obj in tracked_objects_raw: |
|
|
x1, y1, x2, y2, obj_id = [int(val) for val in obj] |
|
|
|
|
|
best_iou = 0 |
|
|
best_label = None |
|
|
for det in original_detections: |
|
|
iou_score = iou([x1, y1, x2, y2], det['box']) |
|
|
if iou_score > best_iou: |
|
|
best_iou = iou_score |
|
|
best_label = det['label'] |
|
|
|
|
|
|
|
|
if best_label and obj_id not in counted_ids: |
|
|
total_counts[best_label] += 1 |
|
|
counted_ids.add(obj_id) |
|
|
|
|
|
if best_label: |
|
|
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) |
|
|
cv2.putText(frame, f'{best_label} ID: {obj_id}', (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) |
|
|
|
|
|
|
|
|
y_offset = 30 |
|
|
for obj_name, count in total_counts.items(): |
|
|
text = f'Total {obj_name.capitalize()}: {count}' |
|
|
cv2.putText(frame, text, (15, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 5) |
|
|
cv2.putText(frame, text, (15, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) |
|
|
y_offset += 30 |
|
|
|
|
|
out.write(frame) |
|
|
|
|
|
cap.release() |
|
|
out.release() |
|
|
|
|
|
print(f"Video processing finished. Total frames: {frame_number}") |
|
|
return output_video_path |
|
|
|
|
|
|
|
|
with gr.Blocks(css="footer {visibility: hidden}") as demo: |
|
|
gr.Markdown("<h1>Real-Time Object Tracking & Counting with DETR and SORT</h1>") |
|
|
gr.Markdown("Upload a video to see object detection and tracking in action. This demo uses Facebook's DETR model for detection and the SORT algorithm to assign unique IDs and count objects.") |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
input_video = gr.Video(label="Input Video", width=640, height=360) |
|
|
output_video = gr.Video(label="Processed Video", width=640, height=360) |
|
|
|
|
|
submit_button = gr.Button("Submit", variant="primary") |
|
|
|
|
|
gr.Examples( |
|
|
examples=[['5402016-hd_1920_1080_30fps.mp4']], |
|
|
inputs=input_video, |
|
|
label="Click an example to run" |
|
|
) |
|
|
|
|
|
submit_button.click( |
|
|
fn=process_video, |
|
|
inputs=input_video, |
|
|
outputs=output_video |
|
|
) |
|
|
|
|
|
|
|
|
demo.launch() |