|
import os |
|
import cv2 |
|
import numpy as np |
|
import torch |
|
from ultralytics import YOLO |
|
from sort import Sort |
|
|
|
|
|
MODEL_PATH = "yolov12x.pt" |
|
model = YOLO(MODEL_PATH) |
|
|
|
|
|
TRUCK_CLASS_ID = 7 |
|
|
|
|
|
tracker = Sort() |
|
|
|
|
|
CONFIDENCE_THRESHOLD = 0.5 |
|
|
|
|
|
DISTANCE_THRESHOLD = 50 |
|
|
|
|
|
TIME_INTERVALS = { |
|
"fixed": 3, |
|
"moving": 5, |
|
"drone_slow": 4, |
|
"drone_fast": 8, |
|
"dashcam": 6, |
|
"highway": 2, |
|
"city": 9, |
|
"one": 1, |
|
"two": 2, |
|
"three": 3, |
|
"four": 4, |
|
"five": 5, |
|
"six": 6, |
|
"seven": 7, |
|
"eight": 8, |
|
"nine": 9, |
|
"ten": 10 |
|
} |
|
|
|
def old_determine_time_interval(video_filename): |
|
""" |
|
Determines the time interval based on keywords found in the video filename. |
|
Defaults to 7 seconds if no matching keyword is found. |
|
""" |
|
for keyword, interval in TIME_INTERVALS.items(): |
|
if keyword in video_filename: |
|
return interval |
|
return 5 |
|
|
|
def determine_time_interval(video_filename): |
|
print(f"Checking filename: {video_filename}") |
|
for keyword, interval in TIME_INTERVALS.items(): |
|
if keyword in video_filename: |
|
print(f"Matched keyword: {keyword} -> Interval: {interval}") |
|
return interval |
|
print("No keyword match, using default interval: 5") |
|
return 5 |
|
|
|
|
|
def count_unique_trucks(video_path): |
|
cap = cv2.VideoCapture(video_path) |
|
if not cap.isOpened(): |
|
return "Error: Unable to open video file." |
|
|
|
unique_truck_ids = set() |
|
truck_history = {} |
|
|
|
|
|
fps = int(cap.get(cv2.CAP_PROP_FPS)) |
|
|
|
|
|
video_filename = os.path.basename(video_path).lower() |
|
|
|
|
|
time_interval = determine_time_interval(video_filename) |
|
frame_skip = fps * time_interval |
|
|
|
frame_count = 0 |
|
|
|
while True: |
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
|
|
frame_count += 1 |
|
if frame_count % frame_skip != 0: |
|
continue |
|
|
|
|
|
results = model(frame, verbose=False) |
|
|
|
detections = [] |
|
for result in results: |
|
for box in result.boxes: |
|
class_id = int(box.cls.item()) |
|
confidence = float(box.conf.item()) |
|
|
|
|
|
if class_id == TRUCK_CLASS_ID and confidence > CONFIDENCE_THRESHOLD: |
|
x1, y1, x2, y2 = map(int, box.xyxy[0]) |
|
detections.append([x1, y1, x2, y2, confidence]) |
|
|
|
if len(detections) > 0: |
|
detections = np.array(detections) |
|
tracked_objects = tracker.update(detections) |
|
|
|
for obj in tracked_objects: |
|
truck_id = int(obj[4]) |
|
x1, y1, x2, y2 = obj[:4] |
|
|
|
truck_center = (x1 + x2) / 2, (y1 + y2) / 2 |
|
|
|
|
|
if truck_id in truck_history: |
|
last_position = truck_history[truck_id]["position"] |
|
distance = np.linalg.norm(np.array(truck_center) - np.array(last_position)) |
|
|
|
if distance > DISTANCE_THRESHOLD: |
|
|
|
unique_truck_ids.add(truck_id) |
|
|
|
else: |
|
|
|
truck_history[truck_id] = { |
|
"frame_count": frame_count, |
|
"position": truck_center |
|
} |
|
unique_truck_ids.add(truck_id) |
|
|
|
cap.release() |
|
|
|
return {"Total Unique Trucks": len(unique_truck_ids)} |
|
|
|
|
|
def analyze_video(video_file): |
|
result = count_unique_trucks(video_file) |
|
return "\n".join([f"{key}: {value}" for key, value in result.items()]) |
|
|
|
|
|
import gradio as gr |
|
iface = gr.Interface( |
|
fn=analyze_video, |
|
inputs=gr.Video(label="Upload Video"), |
|
outputs=gr.Textbox(label="Analysis Result"), |
|
title="YOLOv12x Unique Truck Counter", |
|
description="Upload a video to count unique trucks using YOLOv12x and SORT tracking." |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|
|
|