# 导入必要的库
import numpy as np
import cv2
import torch
import os
import time
import pathlib
from models.FastRCNN import FastRCNN_model


def evaluate_video(config):
    video_path = config['SOURCE_VIDEO_PATH']
    classes = config['CLASSES']
    pred_video_path =config['PRED_VIDEO_PATH']
    best_model_path = config['BEST_MODEL_PATH_TO_PRED']#如果用自己训练的模型请替换掉这里的路径
    num_classes = config['N_CLASSES']
    device = config['DEVICE']

    # 解析命令行参数

    if not os.path.exists(pred_video_path):
        # 如果目录不存在，则创建目录
        os.makedirs(pred_video_path)
        print(f"Directory '{pred_video_path}' was created.")
    else:
        print(f"Directory '{pred_video_path}' already exists.")

    # 为每个类别生成一个不同的颜色
    COLORS = np.random.uniform(0, 255, size=(len(classes), 3))

    # 加载最佳模型和训练好的权重
    model = FastRCNN_model(num_classes=num_classes)
    best_model = torch.load(best_model_path, map_location=device)
    model.load_state_dict(best_model['model_state_dict'])  # 如果用自己训练的模型请替换掉这里的路径
    model.to(device).eval()

    # 定义检测阈值，低于此分数的检测将被忽略
    detection_threshold = 0.8
    # 设置视频帧的调整大小目标
    RESIZE_TO = (512, 512)

    # 打开视频文件
    cap = cv2.VideoCapture(video_path)

    # 检查视频是否成功打开
    if (cap.isOpened() == False):
        print('Error while trying to read video. Please check path again')

    # 定义编解码器并创建VideoWriter对象来保存输出视频
    out = cv2.VideoWriter(f"{pred_video_path}/video_pared.mp4",
                          cv2.VideoWriter_fourcc(*'mp4v'), 30,
                          RESIZE_TO)

    frame_count = 0  # 计算总帧数
    total_fps = 0  # 计算总帧率（FPS）

    # 循环直到视频结束
    while (cap.isOpened()):
        # 读取视频的每一帧
        ret, frame = cap.read()
        if ret:
            frame = cv2.resize(frame, RESIZE_TO)  # 调整帧大小
            image = frame.copy()  # 复制帧以进行操作
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)  # 将BGR转换为RGB
            image /= 255.0  # 将像素值归一化到0-1
            image = np.transpose(image, (2, 0, 1)).astype(np.float32)  # 调整通道顺序
            image = torch.tensor(image, dtype=torch.float).cuda()  # 转换为torch tensor并移动到CUDA
            image = torch.unsqueeze(image, 0)  # 添加批次维度
            start_time = time.time()  # 记录开始时间
            with torch.no_grad():  # 关闭梯度计算
                outputs = model(image.to(device))  # 对当前帧进行预测
            end_time = time.time()  # 记录结束时间

            fps = 1 / (end_time - start_time)  # 计算当前帧的FPS
            total_fps += fps  # 累加FPS以计算平均FPS
            frame_count += 1  # 帧数加一

            outputs = [{k: v.to('cpu') for k, v in t.items()} for t in outputs]  # 将检测结果移动到CPU
            if len(outputs[0]['boxes']) != 0:  # 如果存在检测到的边界框
                boxes = outputs[0]['boxes'].data.numpy()  # 获取边界框坐标
                scores = outputs[0]['scores'].data.numpy()  # 获取检测分数
                boxes = boxes[scores >= detection_threshold].astype(np.int32)  # 根据阈值过滤边界框
                draw_boxes = boxes.copy()  # 复制边界框用于绘制
                pred_classes = [classes[i] for i in outputs[0]['labels'].cpu().numpy()]  # 获取预测的类别名称

                # 在每个边界框上绘制类别名称和边框
                for j, box in enumerate(draw_boxes):
                    class_name = pred_classes[j]  # 获取类别名称
                    color = COLORS[classes.index(class_name)]  # 获取类别对应的颜色
                    cv2.rectangle(frame,  # 绘制边界框
                                  (int(box[0]), int(box[1])),
                                  (int(box[2]), int(box[3])),
                                  color, 2)
                    cv2.putText(frame, class_name,  # 绘制类别名称
                                (int(box[0]), int(box[1] - 5)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, color,
                                2, lineType=cv2.LINE_AA)
                    cv2.putText(frame, f"{fps:.0f} FPS",  # 显示当前FPS
                                (15, 25),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
                                2, lineType=cv2.LINE_AA)

            cv2.imshow('image', frame)  # 显示处理后的帧
            out.write(frame)  # 将帧写入输出视频
            if cv2.waitKey(1) & 0xFF == ord('q'):  # 按'q'键退出
                break
        else:
            break

    # 释放VideoCapture
    cap.release()
    # 关闭所有OpenCV窗口
    cv2.destroyAllWindows()

    # 计算并打印平均FPS
    avg_fps = total_fps / frame_count
    print(f"Average FPS: {avg_fps:.3f}")

from config import get_config_from_xml
from data_proc.dataset_prepare import prepare_dataset
if __name__ == '__main__':
    cfg_xml_file = "configurations/config_fastrcnn_bce_epoch_10.xml"
    cfg = get_config_from_xml(cfg_xml_file)
    prepare_dataset(cfg)
    evaluate_video(cfg)
