import json
import sys
import cv2
import numpy as np
import onnx_predict.onnx_utils as onnx_utils
import matplotlib.pyplot as plt
import os
import yolo_decode as yd

import evaluation_common.evaluation_util as evaluation_util


def sort_keypoints(keypoints):
    """
    Sort the keypoints by their position in the image.

    @param keypoints: 4行2列的关键点坐标，numpy数组，每行代表一个关键点的坐标(x,y)
    @return: 按左上、右上、右下、左下顺序排列的关键点坐标
    """
    index_left_up = np.argmin(np.sum(keypoints * np.array([1, 1]), axis=1))
    index_left_down = np.argmin(np.sum(keypoints * np.array([1, -1]), axis=1))
    index_right_down = np.argmax(np.sum(keypoints * np.array([1, 1]), axis=1))
    index_right_up = np.argmax(np.sum(keypoints * np.array([1, -1]), axis=1))
    keypoints_sorted = np.array(
        [
            keypoints[index_left_up],
            keypoints[index_right_up],
            keypoints[index_right_down],
            keypoints[index_left_down],
        ]
    )
    return keypoints_sorted


def draw_and_save(image_name, img_rgb, out, save_folder):
    """
    绘制可视化结果并保存
    :param image_name: 图像名称,保存结果的文件名会与图像名称相同
    :param img_rgb: 用于绘制可视化结果的图像
    :param out: 神经网络输出经过后处理后的数据
    :param save_folder: 存放结果的目录
    """
    # 绘制可视化结果
    h = img_rgb.shape[0]
    colors1 = [255, 0, 0]
    # 矩形框的颜色
    colors2 = [0, 255, 255]
    # 关键点的颜色
    colors3 = [255, 255, 0]
    if out is not None:
        out = out / 256 * h
        # 置信度需要保持不变
        out[:, 4] = out[:, 4] / h * 256
        for rect in out:
            cv2.putText(
                img_rgb,
                str(round(rect[4], 2)),
                (int(rect[0] - rect[2] // 2), int(rect[1] - rect[3] // 2)),
                1,
                5,
                colors1,
                5,
            )
            # for i in [5, 8, 11, 14]:
            #     cv2.putText(
            #         img_rgb,
            #         str(round(rect[i + 2], 2)),
            #         (int(rect[i] - 75), int(rect[i + 1] - 20)),
            #         1,
            #         2,
            #         colors3,
            #         2,
            #     )

            rect = list(map(int, rect))
            cv2.rectangle(
                img_rgb,
                (rect[0] - rect[2] // 2, rect[1] - rect[3] // 2),
                (rect[0] + rect[2] // 2, rect[1] + rect[3] // 2),
                colors1,
                3,
            )
            keypoints_array = np.array(
                [
                    [rect[5], rect[6]],
                    [rect[8], rect[9]],
                    [rect[11], rect[12]],
                    [rect[14], rect[15]],
                ]
            ).astype(np.int32)
            keypoints_array = sort_keypoints(keypoints_array)
            cv2.polylines(img_rgb, [keypoints_array], True, colors2, 3)

            r = img_rgb.shape[0] // 100
            cv2.circle(img_rgb, (int(keypoints_array[0, 0]), int(keypoints_array[0, 1])), r, colors3, -1)
            cv2.circle(img_rgb, (int(keypoints_array[1, 0]), int(keypoints_array[1, 1])), r, colors3, -1)
            cv2.circle(img_rgb, (int(keypoints_array[2, 0]), int(keypoints_array[2, 1])), r, colors3, -1)
            cv2.circle(img_rgb, (int(keypoints_array[3, 0]), int(keypoints_array[3, 1])), r, colors3, -1)

    plt.imsave(os.path.join(save_folder, image_name), img_rgb)


def main():
    # 与神经网络对应的图片以及标签文件所在的目录
    img_folder = r"D:\desktopD\aidc_local\models\keypoint_detection\app_runs_result\test_barcode_img_for_pose\2D"
    # 存放DLC模型神经网络输出的数据
    json_folder = r"D:\desktopD\aidc_local\models\keypoint_detection\app_runs_result\二维码推理结果\模型2\dlc未量化"
    # 存放可视化结果的目录
    save_folder = r"C:\Users\Administrator\Desktop\ultralytics-action\预测结果\dlc_now"
    os.makedirs(save_folder, exist_ok=True)

    # 定义各个统计量的计数器
    # 漏检关键点数
    missed_keypoints_num = 0
    # 错检关键点数
    false_keypoints_num = 0
    # 正确检测关键点数
    correct_keypoints_num = 0
    # 正确检测关键点与标签关键点的距离误差之和
    keypoint_distance_sum = 0
    # 漏检目标检测框的数量
    missed_bbox_num = 0
    # 已经处理的图片数量
    processed_img_num = 0
    # 标签中的关键点数
    ground_truth_keypoints_num = 0
    # 标签中的目标检测框数
    ground_truth_bbox_num = 0
    # 正确检测的目标检测框数
    correct_bbox_num = 0
    # 正确检测的目标检测框与标签目标检测框的 iou 之和
    bbox_iou_sum = 0

    image_name_list = os.listdir(img_folder)
    for image_name in image_name_list:
        if not image_name.endswith(".png"):
            continue
        print(image_name)
        # 读取神经网络输出的json格式数据
        json_name = image_name.replace(".png", ".json")
        with open(os.path.join(json_folder, json_name), "r") as f:
            tensor1 = json.load(f)
        wait_for_decode = np.array(tensor1)

        # 读取图像，用于绘制可视化结果
        img_rgb = cv2.imread(os.path.join(img_folder, image_name))

        # 得到标签label文件的名称，用于计算关键点预测的指标。label文件与图像文件同名，且位于同一目录下
        label_name = image_name.replace(".png", ".json")

        # 计算关键点预测结果与标签对比的指标
        label_path = os.path.join(img_folder, label_name)
        # 读取label文件，并提取关键点坐标，用于计算关键点预测的指标
        ground_truth_keypoints_array = evaluation_util.extract_keypoint_array_from_json(
            label_path, ["QR", "QR_CODE_V3", "DM_CODE_V4"]
        )
        # 从标签 json 中读取目标检测框数据
        ground_truth_bbox_array = evaluation_util.extract_intact_bbox_from_json(
            label_path, ["QR", "QR_CODE_V3", "DM_CODE_V4"]
        )

        # # 读取label文件，并提取关键点坐标，用于计算关键点预测的指标
        # ground_truth_keypoints_array = evaluation_util.extract_keypoint_array_from_json(label_path, ["CODE128"])
        # # 从标签 json 中读取目标检测框数据
        # ground_truth_bbox_array = evaluation_util.extract_intact_bbox_from_json(label_path, ["CODE128"])

        # 累加标签关键点数和目标检测框数
        ground_truth_keypoints_num += ground_truth_keypoints_array.shape[0]
        ground_truth_bbox_num += ground_truth_bbox_array.shape[0]

        # 调用decode函数，对神经网络输出进行解码，得到可以用于后处理的数据
        predicted_data = yd.yolo_decode(wait_for_decode, stride=16)

        # 调用nms_pose函数，对解码后的数据进行阈值过滤和非极大值抑制，得到最终的关键点坐标
        out = onnx_utils.nms_pose(prediction=predicted_data, conf_threshold=0.6, nms_threshold=0.8)
        out = out[0]

        if os.path.exists(label_path):
            if out is not None:
                # 预测的坐标相对于256*256的图像
                predicted_keypoints_array = np.vstack([out[:, 5:7], out[:, 8:10], out[:, 11:13], out[:, 14:16]])
                predict_bbox_array = np.vstack(out[:, 0:4])

            else:
                predicted_keypoints_array = np.array([])
                predict_bbox_array = np.array([])

            # 计算关键点预测结果与标签对比的指标
            (
                missed_keypoints_num_per_image,
                false_keypoints_num_per_image,
                correct_keypoints_num_per_image,
                sum_distance_per_image,
            ) = evaluation_util.keypoints_statistics(
                predicted_keypoints=predicted_keypoints_array,
                ground_truth_keypoints=ground_truth_keypoints_array / img_rgb.shape[0] * 256,
                distance_threshold=10.0,
            )

            # 计算单张图片中目标检测框检测的漏检数目
            (
                missed_bbox_num_per_image,
                correct_bbox_num_per_image,
                iou_sum_per_image,
            ) = evaluation_util.bbox_statistics(
                predicted_bbox=predict_bbox_array,
                ground_truth_bbox=ground_truth_bbox_array / img_rgb.shape[0] * 256,
                iou_threshold=0.8,
            )

            # 计算累计的指标数据
            missed_keypoints_num += missed_keypoints_num_per_image
            false_keypoints_num += false_keypoints_num_per_image
            correct_keypoints_num += correct_keypoints_num_per_image
            keypoint_distance_sum += sum_distance_per_image
            missed_bbox_num += missed_bbox_num_per_image
            bbox_iou_sum += iou_sum_per_image
            correct_bbox_num += correct_bbox_num_per_image
            processed_img_num += 1

            save_image_name = "{}___{}_{}_{}_{}_{}.png".format(
                image_name.replace(".png", ""),
                missed_keypoints_num_per_image,
                false_keypoints_num_per_image,
                round(
                    sum_distance_per_image / (correct_keypoints_num_per_image + 1e-5),
                    3,
                ),
                missed_bbox_num_per_image,
                round(bbox_iou_sum / (correct_bbox_num + 1e-5), 3),
            )

            # 绘制可视化结果并保存
            draw_and_save(save_image_name, img_rgb, out, save_folder)
        else:
            draw_and_save(image_name, img_rgb, out, save_folder)

    if processed_img_num >= 0:
        with open(os.path.join(save_folder, "result.txt"), "w") as f:
            f.write(
                "missed_keypoints_num: {}\n"
                "missed_keypoints_rate: {:.3f}\n"
                "false_keypoints_num: {}\n"
                "average_distance: {:.3f}\n"
                "missed_bbox_num: {}\n"
                "missed_bbox_rate: {:.3f}\n"
                "average_iou: {:.3f}\n"
                "processed_img_num: {}\n".format(
                    missed_keypoints_num,
                    missed_keypoints_num / (ground_truth_keypoints_num + 1e-5),
                    false_keypoints_num,
                    keypoint_distance_sum / (correct_keypoints_num + 1e-5),
                    missed_bbox_num,
                    missed_bbox_num / (ground_truth_bbox_num + 1e-5),
                    round(bbox_iou_sum / (correct_bbox_num + 1e-5), 3),
                    processed_img_num,
                )
            )


if __name__ == "__main__":
    main()
