from onnxruntime.transformers.models.gpt2.gpt2_parity import score

from omnidet.models.detection_decoder import YoloDecoder, YOLOLayer
import torch
from torchvision import transforms
from PIL import Image
import os
import cv2
import numpy as np
from omnidet.models.resnet import ResnetEncoder
import argparse
import json
from pathlib import Path
import torchvision.transforms as T
import yaml
from omnidet.utils import Tupperware
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
from omnidet.train_utils.detection_utils import non_max_suppression


def inputs_to_device(self, inputs):
    for key, ipt in inputs.items():
        inputs[key] = ipt.to(self.device)

def printj(dic):
    return print(json.dumps(dic, indent=4))

def collect_args() -> argparse.Namespace:
    """Set command line arguments"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', help="Config file", type=str, default=Path(__file__).parent / "../data/params.yaml")
    args = parser.parse_args()
    return args

def collect_tupperware() -> Tupperware:
    config = collect_args()
    params = yaml.safe_load(open(config.config))
    args = Tupperware(params)
    printj(args)
    return args

def detect_yolo_output(yolo_outputs, img_dim, original_dim,conf_thresh=0.5, iou_thresh=0.4):
        """
        后处理 YOLO 输出，提取边界框，类别和置信度
        """
        # print(f"yolo_outputs shape: {yolo_outputs.shape}")  # 输出yolo_outputs的形状，查看数据结构
        # print(yolo_outputs)
        boxes = []
        scores = []
        labels = []

        scale_x = original_dim[1] / img_dim[1]  # 计算x方向的缩放因子
        scale_y = original_dim[0] / img_dim[0]  # 计算y方向的缩放因子

        for output in yolo_outputs:
            num_boxes = output.shape[0]
            boxes_batch = []
            scores_batch = []
            labels_batch = []

            for i in range(num_boxes):
                # 获取框的坐标
                box = output[i, :4]  # x, y, w, h
                conf = output[i, 4]  # 置信度
                class_scores = output[i, 5:]  # 类别得分

                # print debugging statements
                # print(f"Box: {box} Confidence: {conf} Class Scores: {class_scores}")

                # 过滤掉低置信度框
                if conf < conf_thresh:
                    continue

                # 计算类别分数和标签
                score, label = torch.max(class_scores, dim=-1)

                # 如果分数低于阈值，则忽略该框
                if score < conf_thresh:
                    continue


                # 打印 box 中的所有数据
                print("Box data:", box)
                print("Label:", label)
                print("Confidence:", conf)
                # 将框转换为 [x1, y1, x2, y2] 格式
                x_center, y_center, w, h = box
                x1 = x_center - w / 2
                y1 = y_center - h / 2
                x2 = x_center + w / 2
                y2 = y_center + h / 2

                # x1 = x_center
                # y1 = y_center
                # x2 = w
                # y2 = h

                # # 恢复到原始图像尺寸
                x1 *= scale_x
                y1 *= scale_y
                x2 *= scale_x
                y2 *= scale_y

                # 打印框的坐标
                print(f"Box1 {i}: [x1, y1, x2, y2] = [{x1.item()}, {y1.item()}, {x2.item()}, {y2.item()}]")
                print(scale_x)
                print(scale_y)

                boxes_batch.append([x1, y1, x2, y2])
                scores_batch.append(score * conf)  # 置信度乘以类别分数作为最终得分
                labels_batch.append(label)
                 # 存储每个图像的结果
        boxes.append(torch.tensor(boxes_batch))
        scores.append(torch.tensor(scores_batch))
        labels.append(torch.tensor(labels_batch))
        return boxes, scores, labels

def inference():
    args = collect_tupperware()
    input_folder = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/3.7'  # 输入图片文件夹
    output_folder = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/3.7_output'  # 输出图片文件夹
    output_folder_bk = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/test_png_output/对比/resized_pictures'
    args.model_checkpoint_encoder = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/encoder.pth'
    args.model_checkpoint_detection = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/detection.pth'
    args.dataset_dir = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/cropped_00000_FV.png'
    args.originimage = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/test_png/00037_FV.png'
    args.carpicture = '/home/li/深度学习/BSDpicture&video/BSD图片和视频/WIN_20250123_01_17_35_Pro (2).jpg'
    # 初始化模型和设备
    device = torch.device("cpu")
    # --- Init Detection model ---
    encoder = ResnetEncoder(num_layers=18, pretrained=False).to(device)  # 假设使用18层的ResNet
    decoder = YoloDecoder(encoder.num_ch_enc, args).to(device)

    checkpoint_encoder = torch.load(args.model_checkpoint_encoder, map_location=device, weights_only=True)
    checkpoint_detection = torch.load(args.model_checkpoint_detection, map_location=device, weights_only=True)

    # 获取当前encoder模型的权重字典
    model_dict_encoder = encoder.state_dict()

    # 自适应键加载：将预训练的权重与当前模型的权重字典匹配
    pretrained_dict_encoder = {k: v for k, v in checkpoint_encoder.items() if k in model_dict_encoder}

    # 加载匹配的预训练权重到encoder
    model_dict_encoder.update(pretrained_dict_encoder)
    encoder.load_state_dict(model_dict_encoder)
    decoder.load_state_dict(checkpoint_detection)

    # 将模型切换到评估模式
    encoder.eval()
    decoder.eval()

    # 裁剪处理
    cropped_coords = dict(Car1=dict(FV=(114, 110, 1176, 610),
                                         MVL=(343, 5, 1088, 411),
                                         MVR=(185, 5, 915, 425),
                                         RV=(186, 203, 1105, 630)),
                            Car2=dict(FV=(160, 272, 1030, 677),
                                         MVL=(327, 7, 1096, 410),
                                         MVR=(175, 4, 935, 404),
                                         RV=(285, 187, 1000, 572)),
                            Car3=dict(FV=(300, 20, 1620, 800),
                                        TEST=(200, 20, 1000, 600)))
    cropped_coords = cropped_coords["Car3"]["TEST"]
    offset = cropped_coords[:4]
    # 获取目标裁剪区域的尺寸
    crop_width = offset[2] - offset[0]
    crop_height = offset[3] - offset[1]

    # 计算宽高比例
    width_ratio = crop_width / args.input_width
    height_ratio = crop_height / args.input_height

    # 用于在一个窗口显示多张图像
    # fig, axs = plt.subplots(nrows=len(os.listdir(input_folder)), figsize=(15, len(os.listdir(input_folder)) * 5))
    # 遍历输入文件夹中的所有图片
    for img_name in os.listdir(input_folder):
        img_path = os.path.join(input_folder, img_name)
        # 加载图片
        image = Image.open(img_path).convert("RGB")
        originalImage = image
        # image = image.crop(cropped_coords)
        cutimage = image
        to_tensor = T.ToTensor()
        resize = T.Resize((args.input_height, args.input_width),
                           interpolation=T.InterpolationMode.BICUBIC)
        # resized_image = resize(image)
        # resized_image_bk = np.array(resized_image)
        # resized_image_bk = cv2.cvtColor(resized_image_bk, cv2.COLOR_RGB2BGR)
        # result_resized = os.path.join(output_folder_bk, f"detected_{img_name}")
        # cv2.imwrite(result_resized, resized_image_bk)
        # print(f"resized_image image size: {resized_image.size}")
        image_tensor = to_tensor(image)
        # input_image = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(image_tensor)
        input_image = image_tensor.unsqueeze(0)

        features =encoder(input_image)

        feed_width = args.input_width
        feed_height = args.input_height
    # 通过解码器进行目标检测
        with torch.no_grad():
            yolo_outputs = decoder(features,img_dim=[feed_width, feed_height])["yolo_outputs"]
            print('outputs.shape=',yolo_outputs.shape)
            # yolo_outputs = non_max_suppression(yolo_outputs, conf_thres=args.detection_conf_thres, nms_thres=args.detection_nms_thres)  # nms抑制
            boxes = []
            confidences = []
            class_ids = []
            score = []
            # 遍历 yolo_outputs 数组中的每一行 (每个检测框)
            # 处理 yolo_outputs，获取框和相关信息
            for output in yolo_outputs[0]:  # yolo_outputs[0] 是检测到的框
                out_boxes = output[:4]  # 前四个值是边界框坐标
                confidence = output[4]  # 第五个值是置信度
                score = confidence * output[5]
                class_id = int(output[6])

                # 将 xywh 转换为 (x1, y1, x2, y2)
                x_center, y_center, width, height = out_boxes
                x1 = x_center - width / 2
                y1 = y_center - height / 2
                x2 = x_center + width / 2
                y2 = y_center + height / 2

                # 将转换后的框加入列表
                boxes.append([x1, y1, x2, y2])
                confidences.append(score)  # 添加置信度
                class_ids.append(class_id)  # 添加类别ID

            # 转换为 NumPy 数组
            boxes = np.array(boxes)
            score = np.array(confidences)

            # 执行 NMS (非极大抑制)
            indices = cv2.dnn.NMSBoxes(boxes.tolist(), score.tolist(), score_threshold=0.25, nms_threshold=0.45)
            # 如果 indices 是一个元组，先转换为 NumPy 数组
            if isinstance(indices, tuple):
                indices = np.array(indices)
            # image = np.array(resized_image.copy())
            image = np.array(originalImage.copy())
            # 如果有通过NMS筛选的框，则绘制出来
            for idx in indices.flatten():
                bbox = boxes[idx]
                class_id = class_ids[idx]
                color = (0, 255, 0)  # 示例绿色框，你可以根据 class_id 设置不同颜色
                cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),
                              color, 2)
    # 保存检测结果图像
            result_image_path = os.path.join(output_folder, f"detected_{img_name}")
            imshowimage = image
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            # cv2.imwrite(result_image_path, image)
            # print(f"Saved detection result to {result_image_path}")
            fig, ax = plt.subplots(figsize=(imshowimage.shape[1] / 100, imshowimage.shape[0] / 100))  # 按图像尺寸设置大小
            ax.imshow(imshowimage)
            plt.show()








        # #以下是对xywh进行处理
        # # 假设原始图像尺寸是 966x1280
        # original_dim = originimage.size
        # # 获取预测的 boxes, scores, labels
        # # boxes, scores, labels = detect_yolo_output(yolo_outputs, img_dim ,  original_dim)
        # boxes, scores, labels = detect_yolo_output(yolo_outputs, img_dim ,  img_dim) #288x544
        #
        #
        # # image = np.array(image.copy())
        # image = np.array(resized_image.copy())
        # # 检查加载的图像尺寸（使用 shape）
        # print(f"OutPut image size: {image.shape[1]}x{image.shape[0]}")
        # # 绘制检测框和分类标签
        # for box, score, label in zip(boxes[0], scores[0], labels[0]):
        #     # 绘制边界框
        #     x1, y1, x2, y2 = box
        #     x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
        #     print(f"Box2 {box}: [x1, y1, x2, y2] = [{x1}, {y1}, {x2}, {y2}]")
        #
        #     # 处理绘制框时，确保坐标有效且在图像范围内
        #     x1, y1 = max(0, x1), max(0, y1)
        #     x2, y2 = min(image.shape[1], x2), min(image.shape[0], y2)
        #
        #     cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
        #     # 在框上添加标签和分数
        #     # label_str = f"Label: {label.item()} Score: {score.item():.2f}"
        #     # cv2.putText(image, label_str, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        #
        #
        # image_rgb = image
        # # 检查加载的图像尺寸
        # print(f"OutPut image size: {image_rgb.shape[1]}x{image_rgb.shape[0]}")
        #
        # fig, ax = plt.subplots(figsize=(image_rgb.shape[1] / 100, image_rgb.shape[0] / 100))  # 按图像尺寸设置大小
        # ax.imshow(image_rgb)
        # ax.axis('off')  # 不显示坐标轴
        # # # 保存图像到文件
        # # save_path = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/saved_image.png'  # 你可以根据需要更改文件名和路径
        # # plt.savefig(save_path, bbox_inches='tight', pad_inches=0, dpi=300)
        # plt.show()


if __name__ == '__main__':
    inference()