from omnidet.models.detection_decoder import YoloDecoder, YOLOLayer
import torch
from torchvision import transforms
from PIL import Image
import os
import cv2
import numpy as np
from omnidet.models.resnet import ResnetEncoder
import argparse
import json
from pathlib import Path
import torchvision.transforms as T
import yaml
from omnidet.utils import Tupperware
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
# from ..data_loader.woodscape_loader import WoodScapeRawDataset
# from torch.utils.data import DataLoader
from omnidet.train_utils.detection_utils import non_max_suppression


def inputs_to_device(self, inputs):
    for key, ipt in inputs.items():
        inputs[key] = ipt.to(self.device)

def printj(dic):
    return print(json.dumps(dic, indent=4))

def collect_args() -> argparse.Namespace:
    """Set command line arguments"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', help="Config file", type=str, default=Path(__file__).parent / "../data/params.yaml")
    args = parser.parse_args()
    return args

def collect_tupperware() -> Tupperware:
    config = collect_args()
    params = yaml.safe_load(open(config.config))
    args = Tupperware(params)
    printj(args)
    return args

def detect_yolo_output(yolo_outputs, img_dim, original_dim,conf_thresh=0.5, iou_thresh=0.4):
        """
        后处理 YOLO 输出，提取边界框，类别和置信度
        """
        # print(f"yolo_outputs shape: {yolo_outputs.shape}")  # 输出yolo_outputs的形状，查看数据结构
        # print(yolo_outputs)
        boxes = []
        scores = []
        labels = []

        scale_x = original_dim[1] / img_dim[1]  # 计算x方向的缩放因子
        scale_y = original_dim[0] / img_dim[0]  # 计算y方向的缩放因子

        for output in yolo_outputs:
            num_boxes = output.shape[0]
            boxes_batch = []
            scores_batch = []
            labels_batch = []

            for i in range(num_boxes):
                # 获取框的坐标
                box = output[i, :4]  # x, y, w, h
                conf = output[i, 4]  # 置信度
                class_scores = output[i, 5:]  # 类别得分

                # print debugging statements
                # print(f"Box: {box} Confidence: {conf} Class Scores: {class_scores}")

                # 过滤掉低置信度框
                if conf < conf_thresh:
                    continue

                # 计算类别分数和标签
                score, label = torch.max(class_scores, dim=-1)

                # 如果分数低于阈值，则忽略该框
                if score < conf_thresh:
                    continue


                # 打印 box 中的所有数据
                print("Box data:", box)
                print("Label:", label)
                print("Confidence:", conf)
                # 将框转换为 [x1, y1, x2, y2] 格式
                x_center, y_center, w, h = box
                x1 = x_center - w / 2
                y1 = y_center - h / 2
                x2 = x_center + w / 2
                y2 = y_center + h / 2

                # x1 = x_center
                # y1 = y_center
                # x2 = w
                # y2 = h

                # # 恢复到原始图像尺寸
                x1 *= scale_x
                y1 *= scale_y
                x2 *= scale_x
                y2 *= scale_y

                # 打印框的坐标
                print(f"Box1 {i}: [x1, y1, x2, y2] = [{x1.item()}, {y1.item()}, {x2.item()}, {y2.item()}]")
                print(scale_x)
                print(scale_y)

                boxes_batch.append([x1, y1, x2, y2])
                scores_batch.append(score * conf)  # 置信度乘以类别分数作为最终得分
                labels_batch.append(label)
                 # 存储每个图像的结果
        boxes.append(torch.tensor(boxes_batch))
        scores.append(torch.tensor(scores_batch))
        labels.append(torch.tensor(labels_batch))
        return boxes, scores, labels

def inference():
    args = collect_tupperware()
    input_folder = '/home/li/深度学习/BSDpicture&video/pictures'  # 输入图片文件夹
    output_folder = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/capture_output/output_video.avi'  # 输出摄像头拍摄视频
    original_folder= '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/capture_output/original_video.avi'
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')  # 使用XVID编码格式
    fps = 10  # 帧率设置
    frame_size = (640, 480)
    video_writer = cv2.VideoWriter(output_folder, fourcc, fps, frame_size)
    original_video_writer = cv2.VideoWriter(original_folder, fourcc, fps, frame_size)
    args.model_checkpoint_encoder = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/encoder.pth'
    args.model_checkpoint_detection = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/detection.pth'
    args.dataset_dir = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/cropped_00000_FV.png'
    args.originimage = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/test_png/00037_FV.png'
    args.carpicture = '/home/li/深度学习/BSDpicture&video/BSD图片和视频/WIN_20250123_01_17_35_Pro (2).jpg'
    # 初始化模型和设备
    device = torch.device("cpu")
    # --- Init Detection model ---
    encoder = ResnetEncoder(num_layers=18, pretrained=False).to(device)  # 假设使用18层的ResNet
    decoder = YoloDecoder(encoder.num_ch_enc, args).to(device)

    checkpoint_encoder = torch.load(args.model_checkpoint_encoder, map_location=device, weights_only=True)
    checkpoint_detection = torch.load(args.model_checkpoint_detection, map_location=device, weights_only=True)

    # 获取当前encoder模型的权重字典
    model_dict_encoder = encoder.state_dict()

    # 自适应键加载：将预训练的权重与当前模型的权重字典匹配
    pretrained_dict_encoder = {k: v for k, v in checkpoint_encoder.items() if k in model_dict_encoder}

    # 加载匹配的预训练权重到encoder
    model_dict_encoder.update(pretrained_dict_encoder)
    encoder.load_state_dict(model_dict_encoder)
    decoder.load_state_dict(checkpoint_detection)

    # 将模型切换到评估模式
    encoder.eval()
    decoder.eval()

    # 裁剪处理
    cropped_coords = dict(Car1=dict(FV=(114, 110, 1176, 610),
                                         MVL=(343, 5, 1088, 411),
                                         MVR=(185, 5, 915, 425),
                                         RV=(186, 203, 1105, 630)),
                            Car2=dict(FV=(160, 272, 1030, 677),
                                         MVL=(327, 7, 1096, 410),
                                         MVR=(175, 4, 935, 404),
                                         RV=(285, 187, 1000, 572)),
                            Car3=dict(FV=(300, 20, 1620, 800),
                                        CAPTURE=(20, 80, 600, 400),
                                        TEST=(0,0,640,480)))
    cropped_coords = cropped_coords["Car3"]["CAPTURE"] #最佳方案为 用CAPTURE进行裁剪后送入检测并缩放回原比例进行显示
    offset = cropped_coords[:4]
    # 获取目标裁剪区域的尺寸
    crop_width = offset[2] - offset[0]
    crop_height = offset[3] - offset[1]

    # 计算宽高比例
    width_ratio = crop_width / args.input_width
    height_ratio = crop_height / args.input_height
    # 创建VideoCapture对象，参数为0表示使用本地摄像头
    cap = cv2.VideoCapture('/dev/video0')
    # cap = cv2.VideoCapture('/dev/video0') #摄像头尺寸是640x480
    # 获取摄像头的帧率
    fps = cap.get(cv2.CAP_PROP_FPS)
    # print(f"摄像头的帧率: {fps} FPS")
    if cap.isOpened():
        print("摄像头已成功打开")
    else:
        print("无法打开摄像头")

    while True:
        # 从摄像头中读取一帧图像
        ret, frame = cap.read()
        frame_width =frame.shape[1]
        frame_height =frame.shape[0]
        # print(frame_width, frame_height)
        # 将帧图像转换为PIL格式
        # image = cv2.resize(frame, (1920, 1280))
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        original_image = image
        image = Image.fromarray(image)  # 将图像转换为PIL Image
        image = image.crop(cropped_coords)
        cutimage = image
        to_tensor = T.ToTensor()
        resize = T.Resize((args.input_height, args.input_width),
                           interpolation=T.InterpolationMode.BICUBIC)
        resized_image = resize(image)
        # print(f"resized_image image size: {resized_image.size}")
        image_tensor = to_tensor(resized_image)
        input_image = image_tensor.unsqueeze(0)
        features =encoder(input_image)

        feed_width = args.input_width
        feed_height = args.input_height
    # 通过解码器进行目标检测
        with torch.no_grad():
            yolo_outputs = decoder(features,img_dim=[feed_width, feed_height])["yolo_outputs"]

            yolo_outputs = non_max_suppression(yolo_outputs, conf_thres=args.detection_conf_thres, nms_thres=args.detection_nms_thres)  # nms抑制

            image = np.array(original_image.copy())
            # image = np.array(resized_image.copy())
            if yolo_outputs is not None and len(yolo_outputs) > 0 and yolo_outputs[0] is not None:
                for ind in range(len(yolo_outputs[0])):
                    out_boxes = yolo_outputs[0][ind][:4]
                    # class_id = int(yolo_outputs[0][ind][5])  # 类别
                    confidence = yolo_outputs[0][ind][4]  # 置信度
                    # 直接从 yolo_outputs 中获取类别名称
                    class_name = yolo_outputs[0][ind][6] if len(
                        yolo_outputs[0][ind]) > 6 else "Unknown"  # 假设类别名称在第6索引位置
                    # 只绘制类别为0和1的框
                    if class_name in [0, 1]:
                        # 还原坐标
                        min_x, min_y, max_x, max_y = out_boxes.numpy()
                        min_x *= width_ratio
                        min_y *= height_ratio
                        max_x *= width_ratio
                        max_y *= height_ratio

                        min_x += offset[0]
                        min_y += offset[1]
                        max_x += offset[0]
                        max_y += offset[1]
                        cv2.rectangle(image, (int(min_x), int(min_y)), (int(max_x), int(max_y)), (255, 0, 0), 1)
                        print(f"{class_name}: {confidence:.2f}")
                    # 显示类别和置信度
                        label = f"{class_name}: {confidence:.2f}"
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        cv2.putText(image, label, (int(min_x), int(min_y) - 10), font, 0.6, (255, 0, 0), 1, cv2.LINE_AA)

            # 显示检测后的图像
            result_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)  # 转换为OpenCV的BGR格式

            # 使用OpenCV显示图像
            cv2.imshow("Detection Result", result_image)

            # 将每一帧图像写入视频
            # video_writer.write(result_image)
            # original_image = cv2.cvtColor(original_image, cv2.COLOR_RGB2BGR)
            # original_video_writer.write(original_image)

            # 按下q键退出程序
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # 释放资源
    cap.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    inference()
