from onnxruntime.transformers.models.gpt2.gpt2_parity import score

from omnidet.models.detection_decoder import YoloDecoder, YOLOLayer
import torch
from torchvision import transforms
from PIL import Image
import os
import cv2
import numpy as np
from omnidet.models.resnet import ResnetEncoder
import argparse
import json
from pathlib import Path
import torchvision.transforms as T
import yaml
from omnidet.utils import Tupperware
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
from omnidet.train_utils.detection_utils import non_max_suppression
import onnx
import onnxruntime as ort


def inputs_to_device(self, inputs):
    for key, ipt in inputs.items():
        inputs[key] = ipt.to(self.device)

def printj(dic):
    return print(json.dumps(dic, indent=4))

def collect_args() -> argparse.Namespace:
    """Set command line arguments"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', help="Config file", type=str, default=Path(__file__).parent / "../data/params.yaml")
    args = parser.parse_args()
    return args

def collect_tupperware() -> Tupperware:
    config = collect_args()
    params = yaml.safe_load(open(config.config))
    args = Tupperware(params)
    printj(args)
    return args

def inference():
    args = collect_tupperware()
    input_folder = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/3.7'  # 输入图片文件夹
    output_folder = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/3.7_output'  # 输出图片文件夹
    output_folder_bk = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/test_png_output/对比/resized_pictures'
    args.model_checkpoint_encoder = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/encoder.pth'
    args.model_checkpoint_detection = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18/detection.pth'
    args.dataset_dir = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/cropped_00000_FV.png'
    args.originimage = '/home/li/深度学习/鱼眼相机数据集相关资料/master/test/test_png/00037_FV.png'
    args.carpicture = '/home/li/深度学习/BSDpicture&video/BSD图片和视频/WIN_20250123_01_17_35_Pro (2).jpg'
    # 初始化 ONNX Runtime
    ort_session = ort.InferenceSession('/home/li/深度学习/鱼眼相机数据集相关资料/master/WoodScape-master/omnidet/models/fishcapture_detection_model.onnx')

    # 遍历输入文件夹中的所有图片
    for img_name in os.listdir(input_folder):
        img_path = os.path.join(input_folder, img_name)
        # 加载图片
        image = Image.open(img_path).convert("RGB")
        originalImage = image
        # image = image.crop(cropped_coords)
        cutimage = image
        to_tensor = T.ToTensor()
        resize = T.Resize((args.input_height, args.input_width),
                           interpolation=T.InterpolationMode.BICUBIC)

        image_tensor = to_tensor(image)
        input_image = image_tensor.unsqueeze(0)

        # 转换为 NumPy 数组并准备 ONNX 模型的输入
        input_image_np = input_image.numpy()

        # 使用 ONNX Runtime 进行推理
        ort_inputs = {ort_session.get_inputs()[0].name: input_image_np}
        ort_outs = ort_session.run(None, ort_inputs)

        # 获取目标检测结果
        yolo_outputs = ort_outs[0]
    # 通过解码器进行目标检测

        print('outputs.shape=',yolo_outputs.shape)
        # yolo_outputs = non_max_suppression(yolo_outputs, conf_thres=args.detection_conf_thres, nms_thres=args.detection_nms_thres)  # nms抑制
        boxes = []
        confidences = []
        class_ids = []
        score = []
        # 遍历 yolo_outputs 数组中的每一行 (每个检测框)
        # 处理 yolo_outputs，获取框和相关信息
        for output in yolo_outputs[0]:  # yolo_outputs[0] 是检测到的框
            out_boxes = output[:4]  # 前四个值是边界框坐标
            confidence = output[4]  # 第五个值是置信度
            score = confidence * output[5]
            class_id = int(output[6])

            # 将 xywh 转换为 (x1, y1, x2, y2)
            x_center, y_center, width, height = out_boxes
            x1 = x_center - width / 2
            y1 = y_center - height / 2
            x2 = x_center + width / 2
            y2 = y_center + height / 2

            # 将转换后的框加入列表
            boxes.append([x1, y1, x2, y2])
            confidences.append(score)  # 添加置信度
            class_ids.append(class_id)  # 添加类别ID

        # 转换为 NumPy 数组
        boxes = np.array(boxes)
        score = np.array(confidences)

        # 执行 NMS (非极大抑制)
        indices = cv2.dnn.NMSBoxes(boxes.tolist(), score.tolist(), score_threshold=0.25, nms_threshold=0.45)
        # 如果 indices 是一个元组，先转换为 NumPy 数组
        if isinstance(indices, tuple):
            indices = np.array(indices)
        # image = np.array(resized_image.copy())
        image = np.array(originalImage.copy())
        # 如果有通过NMS筛选的框，则绘制出来
        for idx in indices.flatten():
            bbox = boxes[idx]
            class_id = class_ids[idx]
            # 获取左上角和右下角坐标
            top_left = (int(bbox[0]), int(bbox[1]))
            bottom_right = (int(bbox[2]), int(bbox[3]))

            # 打印 xy 坐标
            print(f"Class ID: {class_id}, Top Left: {top_left}, Bottom Right: {bottom_right}")
            color = (0, 255, 0)  # 示例绿色框，你可以根据 class_id 设置不同颜色
            cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),
                              color, 2)
    # 保存检测结果图像
        result_image_path = os.path.join(output_folder, f"detected_{img_name}")
        imshowimage = image
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        # cv2.imwrite(result_image_path, image)
        # print(f"Saved detection result to {result_image_path}")
        fig, ax = plt.subplots(figsize=(imshowimage.shape[1] / 100, imshowimage.shape[0] / 100))  # 按图像尺寸设置大小
        ax.imshow(imshowimage)
        plt.show()



if __name__ == '__main__':
    inference()