from omnidet.models.detection_decoder import YoloDecoder, YOLOLayer
import torch
from torchvision import transforms
from PIL import Image
import os
import cv2
import numpy as np
from omnidet.models.resnet import ResnetEncoder
import argparse
import json
from pathlib import Path
import torchvision.transforms as T
import yaml
from omnidet.utils import Tupperware
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
# from ..data_loader.woodscape_loader import WoodScapeRawDataset
# from torch.utils.data import DataLoader
from omnidet.train_utils.detection_utils import non_max_suppression


def inputs_to_device(self, inputs):
    for key, ipt in inputs.items():
        inputs[key] = ipt.to(self.device)

def printj(dic):
    return print(json.dumps(dic, indent=4))

def collect_args() -> argparse.Namespace:
    """Set command line arguments"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', help="Config file", type=str, default=Path(__file__).parent / "../data/params.yaml")
    args = parser.parse_args()
    return args

def collect_tupperware() -> Tupperware:
    config = collect_args()
    params = yaml.safe_load(open(config.config))
    args = Tupperware(params)
    printj(args)
    return args

def inference():
    args = collect_tupperware()
    input_video = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/capture_output/right(1)/original_video.avi'  # 输入视频文件路径
    output_folder = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/capture_output/video_detect'  # 输出文件夹
    output_video_path = os.path.join(output_folder, "output_video.mp4")  # 输出视频路径
    args.model_checkpoint_encoder = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/encoder.pth'
    args.model_checkpoint_detection = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/detection.pth'
    args.dataset_dir = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/cropped_00000_FV.png'
    args.originimage = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/test_png/00037_FV.png'
    args.carpicture = '/home/li/深度学习/BSDpicture&video/BSD图片和视频/WIN_20250123_01_17_35_Pro (2).jpg'
    # 初始化模型和设备
    device = torch.device("cpu")
    # --- Init Detection model ---
    encoder = ResnetEncoder(num_layers=18, pretrained=False).to(device)  # 假设使用18层的ResNet
    decoder = YoloDecoder(encoder.num_ch_enc, args).to(device)

    checkpoint_encoder = torch.load(args.model_checkpoint_encoder, map_location=device, weights_only=True)
    checkpoint_detection = torch.load(args.model_checkpoint_detection, map_location=device, weights_only=True)

    # 获取当前encoder模型的权重字典
    model_dict_encoder = encoder.state_dict()

    # 自适应键加载：将预训练的权重与当前模型的权重字典匹配
    pretrained_dict_encoder = {k: v for k, v in checkpoint_encoder.items() if k in model_dict_encoder}

    # 加载匹配的预训练权重到encoder
    model_dict_encoder.update(pretrained_dict_encoder)
    encoder.load_state_dict(model_dict_encoder)
    decoder.load_state_dict(checkpoint_detection)

    # 将模型切换到评估模式
    encoder.eval()
    decoder.eval()

    # 裁剪处理
    cropped_coords = dict(Car1=dict(FV=(114, 110, 1176, 610),
                                         MVL=(343, 5, 1088, 411),
                                         MVR=(185, 5, 915, 425),
                                         RV=(186, 203, 1105, 630)),
                            Car2=dict(FV=(160, 272, 1030, 677),
                                         MVL=(327, 7, 1096, 410),
                                         MVR=(175, 4, 935, 404),
                                         RV=(285, 187, 1000, 572)),
                            Car3=dict(FV=(300, 20, 1620, 800),
                                        TEST=(100, 0, 640, 400)))
    cropped_coords = cropped_coords["Car3"]["TEST"]
    offset = cropped_coords[:4]
    # 获取目标裁剪区域的尺寸
    crop_width = offset[2] - offset[0]
    crop_height = offset[3] - offset[1]

    # 计算宽高比例
    width_ratio = crop_width / args.input_width
    height_ratio = crop_height / args.input_height

    # 打开输入视频
    cap = cv2.VideoCapture(input_video)
    if not cap.isOpened():
        print("无法打开视频文件")
        return

    # 获取视频的帧宽度和高度
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    # print(frame_width, frame_height)
    fps = cap.get(cv2.CAP_PROP_FPS)/2
    frame_count = 0  # 用来统计已处理的视频帧数

    # 设置输出视频的编码格式
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))

    while True:
        # 从摄像头中读取一帧图像
        ret, frame = cap.read()
        if not ret:
            break  # 如果没有读取到帧，退出

        # 将帧图像转换为PIL格式
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        original_image = image
        image = Image.fromarray(image)  # 将图像转换为PIL Image
        image = image.crop(cropped_coords)
        cuted_image = image
        to_tensor = T.ToTensor()
        resize = T.Resize((args.input_height, args.input_width),
                           interpolation=T.InterpolationMode.BICUBIC)
        resized_image = resize(image)
        # print(f"resized_image image size: {resized_image.size}")
        image_tensor = to_tensor(resized_image)
        # input_image = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(image_tensor)
        input_image = image_tensor.unsqueeze(0)

        features =encoder(input_image)

        feed_width = args.input_width
        feed_height = args.input_height
    # 通过解码器进行目标检测
        with torch.no_grad():
            yolo_outputs = decoder(features,img_dim=[feed_width, feed_height])["yolo_outputs"]

            yolo_outputs = non_max_suppression(yolo_outputs, conf_thres=args.detection_conf_thres, nms_thres=args.detection_nms_thres)  # nms抑制

            image = np.array(cuted_image.copy())
            if yolo_outputs is not None and len(yolo_outputs) > 0 and yolo_outputs[0] is not None:
                for ind in range(len(yolo_outputs[0])):
                    out_boxes = yolo_outputs[0][ind][:4]
                    # class_id = int(yolo_outputs[0][ind][5])  # 类别
                    confidence = yolo_outputs[0][ind][4]  # 置信度
                    # 直接从 yolo_outputs 中获取类别名称
                    class_name = yolo_outputs[0][ind][6] if len(
                        yolo_outputs[0][ind]) > 6 else "Unknown"  # 假设类别名称在第6索引位置
                    # 还原坐标
                    min_x, min_y, max_x, max_y = out_boxes.numpy()
                    min_x *= width_ratio
                    min_y *= height_ratio
                    max_x *= width_ratio
                    max_y *= height_ratio

                    # min_x += offset[0]
                    # min_y += offset[1]
                    # max_x += offset[0]
                    # max_y += offset[1]
                    # 只绘制类别为0和1的框
                    if class_name in [0, 1]:
                        cv2.rectangle(image, (int(min_x), int(min_y)), (int(max_x), int(max_y)), (255, 0, 0), 1)
                        print(f"{class_name}: {confidence:.2f}")
                    # 显示类别和置信度
                        label = f"{class_name}: {confidence:.2f}"
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        cv2.putText(image, label, (int(min_x), int(min_y) - 10), font, 0.6, (255, 0, 0), 1, cv2.LINE_AA)

            # 保存处理后的帧到输出视频
            result_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            out.write(result_image)
            frame_count += 1  # 增加处理的视频帧数
        # 当处理完所有帧时退出
        if frame_count >= cap.get(cv2.CAP_PROP_FRAME_COUNT):
            break
    # 释放资源
    cap.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    inference()
