import torch
import cv2
import numpy as np
import os
import onnxruntime as ort
import pandas as pd

os.environ['CUDA_VISIBLE_DEVICES'] = '-1'  # 强制使用 CPU
# 设置环境变量指定 torch.hub 路径（若有需要）
os.environ['TORCH_HOME'] = 'D:/machine learning/yolov5-master'

# 加载 ONNX 模型
ort_session = ort.InferenceSession('runs/train/exp12/weights/best.onnx')

# 定义类别名称
classes = ['people', 'bike', 'tree', 'blind_way', 'hard_shoulder', 'waring_sign', 'garbage_can', 'dog', 'road_block',
           'trafic_cone', 'car', 'pole', 'cat']


def check_overlap(box1, box2):
    """
    检查两个锚框是否重叠
    """
    x1, y1, x2, y2 = box1
    x3, y3, x4, y4 = box2
    return not (x2 < x3 or x1 > x4 or y2 < y3 or y1 > y4)


def process_frame(frame):
    """
    处理单帧图像
    """
    # 转换图像格式和维度，以适应模型输入要求
    input_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    original_height, original_width = frame.shape[:2]
    input_frame = cv2.resize(input_frame, (640, 640))  # 调整图像尺寸为 (640, 640)
    input_frame = np.transpose(input_frame, (2, 0, 1)).astype(np.float32)
    input_frame = input_frame / 255.0
    input_frame = np.expand_dims(input_frame, axis=0)

    # 将数据类型转换为 float16
    import torch
    input_frame = torch.from_numpy(input_frame).half().numpy()  # 将数据转换为 float16 类型
    print("After type conversion, input frame data type:", input_frame.dtype)

    # 打印输入数据信息
    print("Input frame data type:", input_frame.dtype)
    print("Input frame shape:", input_frame.shape)

    # 使用 ONNX 模型进行目标检测
    input_name = ort_session.get_inputs()[0].name
    outputs = ort_session.run(None, {input_name: input_frame})

    # 打印模型输出信息
    print("Model output:", outputs)

    # 这里需要根据 ONNX 模型的输出格式处理 outputs
    # 以下代码假设 outputs 是一个列表，第一个元素是检测结果
    detections = outputs[0]
    # 调整维度，去掉批量大小维度（如果批量大小为 1）
    if detections.shape[0] == 1:
        detections = detections[0]

    # 这里简单假设 detections 是一个 numpy 数组，每一行代表一个检测结果
    # 实际情况可能需要根据模型输出格式进行调整
    detections = pd.DataFrame(detections,
                              columns=['xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class'] + [f'extra_{i}' for i in
                                                                                                 range(13)])
    detections['name'] = [classes[int(cls)] for cls in detections['class']]

    # 过滤低置信度的检测结果
    confidence_threshold = 0.5
    detections = detections[detections['confidence'] >= confidence_threshold]

    # 过滤包含无穷大或 NaN 的检测结果
    detections = detections[~np.isinf(detections[['xmin', 'ymin', 'xmax', 'ymax']]).any(axis=1)]
    detections = detections[~np.isnan(detections[['xmin', 'ymin', 'xmax', 'ymax']]).any(axis=1)]

    # 还原边界框坐标到原始图像尺寸
    detections['xmin'] = detections['xmin'] * original_width / 640
    detections['ymin'] = detections['ymin'] * original_height / 640
    detections['xmax'] = detections['xmax'] * original_width / 640
    detections['ymax'] = detections['ymax'] * original_height / 640

    # 再次过滤包含无穷大或 NaN 的检测结果
    detections = detections[~np.isinf(detections[['xmin', 'ymin', 'xmax', 'ymax']]).any(axis=1)]
    detections = detections[~np.isnan(detections[['xmin', 'ymin', 'xmax', 'ymax']]).any(axis=1)]

    # 提取盲道的锚框
    blind_way_boxes = detections[detections['name'] == 'blind_way'][['xmin', 'ymin', 'xmax', 'ymax']].values

    signal_value = 0
    if len(blind_way_boxes) > 0:
        for blind_way_box in blind_way_boxes:
            for _, obj in detections.iterrows():
                obj_box = [obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax']]
                if check_overlap(blind_way_box, obj_box):
                    if obj['name'] in ['bike', 'car']:
                        signal_value = 2
                        break
                    elif obj['name'] in ['tree', 'hard_shoulder', 'waring_sign', 'garbage_can', 'road_block',
                                         'trafic_cone', 'pole']:
                        signal_value = 1
                        break
                    elif obj['name'] in ['people', 'cat', 'dog']:
                        signal_value = 0
                        break
            if signal_value > 0:
                break
    else:
        signal_value = 0

    # 绘制锚框和标签
    def draw_bounding_boxes(frame, detections):
        for _, detection in detections.iterrows():
            xmin, ymin, xmax, ymax = detection['xmin'], detection['ymin'], detection['xmax'], detection['ymax']
            if np.isinf(xmin) or np.isinf(ymin) or np.isinf(xmax) or np.isinf(ymax):
                print(
                    f"Found infinite value in bounding box coordinates: xmin={xmin}, ymin={ymin}, xmax={xmax}, ymax={ymax}")
                continue
            if np.isnan(xmin) or np.isnan(ymin) or np.isnan(xmax) or np.isnan(ymax):
                print(
                    f"Found NaN value in bounding box coordinates: xmin={xmin}, ymin={ymin}, xmax={xmax}, ymax={ymax}")
                continue
            xmin, ymin, xmax, ymax = int(xmin), int(ymin), int(xmax), int(ymax)
            if 0 <= xmin < original_width and 0 <= ymin < original_height and 0 <= xmax < original_width and 0 <= ymax < original_height:
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)
            else:
                print(f"Invalid bounding box coordinates: xmin={xmin}, ymin={ymin}, xmax={xmax}, ymax={ymax}")
        return frame

    def draw_labels(frame, detections):
        for _, detection in detections.iterrows():
            xmin, ymin, xmax, ymax = int(detection['xmin']), int(detection['ymin']), int(detection['xmax']), int(
                detection['ymax'])
            label = detection['name']
            cv2.putText(frame, label, (xmin, ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        return frame

    frame = draw_bounding_boxes(frame, detections)
    frame = draw_labels(frame, detections)

    return signal_value


# 示例：读取视频文件并处理每一帧
cap = cv2.VideoCapture('忙碌的盲道-视障出行第一视角.mp4')

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break

    signal = process_frame(frame)
    print(f"当前帧的信号值: {signal}")

    # 显示处理后的帧
    output_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)  # 转换回 BGR 格式用于显示
    cv2.imshow('YOLOv5 Detection', output_frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
