import numpy as np
import cv2
import json
import onnxruntime as ort
import json
import yolo_decode as yd

# def make_anchors(grid_cell_offset=0.5):
#     """Generate anchors from features."""
#     strides = [8, 16, 32]
#     anchor_points, stride_tensor = [], []
#     feat = [[4, 4], [2, 2], [1, 1]]
#     for i, stride in enumerate(strides):
#         h, w = feat[i]
#         sx = np.arange(w) + grid_cell_offset  # shift x
#         sy = np.arange(h) + grid_cell_offset  # shift y
#         sy, sx = np.meshgrid(sy, sx)
#         anchor_points.append(np.stack((sx, sy), -1).reshape(-1, 2))
#         stride_tensor.append(np.full((h * w, 1), stride))
#     return np.concatenate(anchor_points), np.concatenate(stride_tensor)


def xywh2xyxy(x):
    """
    对m行n列的yolov8神经网络预测结果进行转换，网络预测的每一行数据为[x,y,w,h,.....]，我们对每行的前4个数据机型操作，将其变成[x1,y1,x2,y2]形式
    """
    y = np.copy(x)
    y[..., 0] = x[..., 0] - x[..., 2] / 2
    y[..., 1] = x[..., 1] - x[..., 3] / 2
    y[..., 2] = x[..., 0] + x[..., 2] / 2
    y[..., 3] = x[..., 1] + x[..., 3] / 2
    return y.squeeze()


def xyxy2xywh(x):
    """
    xywh2xyxy的逆操作
    """
    y = np.copy(x)
    y[..., 0] = (x[..., 0] + x[..., 2]) / 2  # x center
    y[..., 1] = (x[..., 1] + x[..., 3]) / 2  # y center
    y[..., 2] = x[..., 2] - x[..., 0]  # width
    y[..., 3] = x[..., 3] - x[..., 1]  # height
    return y


def iou_batch(box: np.ndarray, box_array: np.ndarray):
    """
    一次性计算一个 box 与一组 box_array 中的所有 box 的交并比

    :param box: 第一个框的坐标，shape 为 (1行,n列)，前四列为(x, y, w, h)
    :param box_array: 第二组框的坐标，shape 为 (m行, n列)，前4列为(x, y, w, h)
    :return: box与box_array中所有框的交并比 iou的组合，shape 为 (n,)
    """
    # 计算 box与 box_array中所有框的交集的左上角和右下角坐标，没有交集则为负值。
    box = box.squeeze()
    x1 = np.maximum(box[0] - box[2] / 2, box_array[:, 0] - box_array[:, 2] / 2)
    y1 = np.maximum(box[1] - box[3] / 2, box_array[:, 1] - box_array[:, 3] / 2)
    x2 = np.minimum(box[0] + box[2] / 2, box_array[:, 0] + box_array[:, 2] / 2)
    y2 = np.minimum(box[1] + box[3] / 2, box_array[:, 1] + box_array[:, 3] / 2)

    # 计算交集面积
    inter_area = np.maximum(0, x2 - x1) * np.maximum(0, y2 - y1)

    # 计算 box1 和 box2 中每个框的面积
    box1_area = box[2] * box[3]
    box2_area = box_array[:, 2] * box_array[:, 3]

    # 计算并集面积
    union_area = box1_area + box2_area - inter_area

    # 返回所有交并比 iou
    return inter_area / union_area


def nms(prediction, conf_thres=0.70, nms_thres=0.7):
    nc = prediction.shape[1] - 4
    mi = 4 + nc
    xc = prediction[:, 4:mi].max(1) > conf_thres
    output = [None for _ in range(len(prediction))]
    for i, image_pred in enumerate(prediction):
        image_pred = np.transpose(image_pred)
        x = image_pred[xc[i]]
        [box, cls, _] = np.split(x, (4, 4 + nc), axis=1)
        box = xywh2xyxy(box)
        conf, j = cls.max(1, keepdims=True), cls.argmax(1, keepdims=True)
        if not x.shape[0]:
            continue
        detections = np.concatenate((box, conf, conf, j), 1)
        unique_labels = np.unique(detections[:, -1])
        for c in unique_labels:
            detections_class = detections[detections[:, -1] == c]
            detections_class = detections_class[(-detections_class[:, 4]).argsort()]
            max_detections = []
            while detections_class.shape[0]:
                max_detections.append(np.array([detections_class[0]]))
                if len(detections_class) == 1:
                    break
                ious = iou_batch(max_detections[-1], detections_class[1:])
                detections_class = detections_class[1:][ious < nms_thres]
            max_detections = np.concatenate(max_detections)
            output[i] = max_detections if output[i] is None else np.concatenate((output[i], max_detections))
    return output


def detect(image, model):
    height, width, _ = image.shape
    length = max(height, width)
    imageB = np.zeros((256, 256, 3), np.float32)
    s = 256 / length

    resized_img = cv2.resize(image, (0, 0), fx=s, fy=s)
    h, w, c = resized_img.shape
    imageB[0:h, 0:w] = resized_img

    imageB = np.array([imageB.transpose(2, 0, 1)])
    imageB = np.mean(imageB, axis=1, keepdims=True)
    imageB /= 255

    output = model.run(None, {"input": imageB})
    out = output[0]
    out[:, 0:4, :] = out[:, 0:4, :] * 256
    res = nms(prediction=out)[0]
    if res is not None:
        res[:, 0:4] /= s
        # res = res.astype(np.int32)
        res = res.tolist()
        return res
    else:
        print("No object is detected")
        return None


def nms_pose(prediction: np.ndarray, conf_threshold: float = 0.50, nms_threshold: float = 0.5, ndim=3):
    """
    对一个bounding box的具有4个关键点情况下的yolov8 pose模型推理结果进行后处理，将其转换为最终的预测结果。

    :param prediction: 模型的推理结果
    :param conf_threshold: 对bounding box进行筛选的置信度阈值
    :param nms_threshold: 对bounding box进行非极大抑制的阈值，两个框的iou大于nms_threshold时，置信度较低的框会被舍弃
    :return: output : 一个列表，列表中的每个元素为ndarray，每个ndarray为一张图片的预测结果，有m个结果对应了m行，每一行为归一化的[x,y,w,h,conf1,conf2,conf3,x1,y1,conf1,x2,y2,conf2,x3,y3,conf3,x4,y4,conf4]
    """
    # 获取预测类别数nc
    nc = prediction.shape[1] - 4 - ndim * 4
    # 预测结果中从index=4到mi的nc个内容，对应了不同预测类别的置信度。
    mi = 4 + nc
    # 筛选3个类别中最高置信度大于conf_threshold的预测结果(行索引)
    line_index = prediction[:, 4:mi].max(1) > conf_threshold
    # 根据预测结果的第一个维度，判断预测图片的数量，并创建output列表，每个元素为ndarray，每个ndarray为一张图片的预测结果，初始值为None
    output = [None for _ in range(len(prediction))]
    # 遍历每张图片的预测结果
    for i, image_pred in enumerate(prediction):
        # 转换为(n,m)的形式，n为预测框的数量，m为每一个框预测结果的长度
        image_pred = np.transpose(image_pred)
        # 筛选出置信度大于conf_threshold的预测结果
        x = image_pred[line_index[i]]
        # 按照预测类别数，将预测结果分割为[xywh]、[cls1、cls2、cls3]，以及[4个关键点的坐标和置信度]三部分
        [box, cls, points] = np.split(x, (4, 4 + nc), axis=1)
        # 获取置信度最大的预测类别的值conf和其在cls的索引j。
        conf, j = cls.max(1, keepdims=True), cls.argmax(1, keepdims=True)
        # 若筛选结果为空，则跳过该图片的预测结果处理
        if not x.shape[0]:
            continue
        # 将预测结果重组，其中预测类别转换为了j索引的形式。
        detections = np.concatenate((box, conf, points, j), 1)
        # 获取所有的可能存在的预测类别索引unique_labels
        unique_labels = np.unique(detections[:, -1])

        # 遍历每一个可能存在的预测类别，进行非极大抑制
        for cls_id in unique_labels:
            # 筛选出对应预测类别的预测结果(哪几行)
            detections_class = detections[detections[:, -1] == cls_id]
            # 根据预测框的置信度进行排序
            detections_class = detections_class[(-detections_class[:, 4]).argsort()]

            # 临时列表，用于存储最终保留下来的预测框
            max_detections = []

            # 遍历每一个预测框，进行非极大抑制
            while detections_class.shape[0]:
                # 取出置信度最高（排序后的第一个元素）的预测框，并将其添加到max_detections列表中
                max_detections.append(np.array([detections_class[0]]))
                # 若max_detections列表中元素为一，则跳出循环
                if len(detections_class) == 1:
                    break
                # 否则计算第一个预测框以外的所有预测框与第一个预测框的iou，并根据阈值进行筛选
                ious = iou_batch(max_detections[-1], detections_class[1:])

                # 保留与第一个预测框iou小于nms_threshold的预测框，并赋值给detections_class，再一次进行循环操作，知道detections_class为空或者仅剩余一个元素
                detections_class = detections_class[1:][ious < nms_threshold]

            # 将max_detections列表中的所有预测框合并为一个ndarray，有几个框就有几行。
            max_detections = np.concatenate(max_detections)
            # 将最终结果添加到output列表中
            output[i] = max_detections if output[i] is None else np.concatenate((output[i], max_detections))
    return output


def detect_pose(
    image: np.ndarray, model: ort.InferenceSession, conf_threshold: float, nms_threshold: float, need_decode=False
) -> list:
    """
    输入rgb通道的图像，将其转换为灰度图，然后resize到256*256，进行模型推理，将推理后的结果经过阈值过滤和非极大抑制
    后，得到最终的关键点预测结果。

    :param image: rgb图像
    :param model: onnx模型会话
    :param conf_threshold: 对bounding box进行筛选的置信度阈值
    :param nms_threshold: 对bounding box进行非极大抑制的阈值，两个框的iou大于nms_threshold时，置信度较低的框会被舍弃
    :param need_decode: 是否需要对神经网络的输出进行解码，默认不需要
    :return: res : 一个列表，列表中的每个元素为一个预测框对应的结果列表，[x,y,w,h,conf1,conf2,conf3,x1,y1,conf1,x2,y2,conf2,x3,y3,conf3,x4,y4,conf4]
    上述坐标均对应到了256*256的原图上。
    """
    # 对输入图像进行预处理，将image无缩放地填充到256*256的imageB中
    height, width, _ = image.shape
    length = max(height, width)
    input_size = 256
    imageB = np.zeros((input_size, input_size, 3), np.float32)
    s = 256 / length

    resized_img = cv2.resize(image, (0, 0), fx=s, fy=s)
    h, w, c = resized_img.shape
    imageB[0:h, 0:w] = resized_img

    # 将h,w,c转换为1，c,h,w，同时将c通道取均值合并为一个通道，并将值归一化到0-1之间
    imageB = np.array([imageB.transpose(2, 0, 1)])
    imageB = np.mean(imageB, axis=1, keepdims=True)
    imageB /= 255

    # 进行模型推理，得到推理结果out
    output = model.run(None, {"input": imageB})
    # with open(r"C:\Users\Administrator\Desktop\ultralytics-action\a.json", "r") as f:
    #     output1 = [np.array(json.load(f))]

    if need_decode:
        # 进行解码，得到最终的关键点预测结果res
        out = output[0]
        out = yd.yolo_decode(out, [16])
    else:
        out = output[0][0]
    # 需要保存推理结果为json文件给kotlin读取并处理则取消注下面两行
    # with open("yolo_pose_data.json", "w") as f:
    #     json.dump(out.tolist(), f)

    # # 将关键点decode放到后处理来
    # head1 = np.zeros((1, 1, 80, 80))
    # head2 = np.zeros((1, 1, 40, 40))
    # head3 = np.zeros((1, 1, 20, 20))
    # anchors, strides = (x for x in yd.make_anchors([head1, head2, head3], [8, 16, 32], 0.5))
    # anchors = anchors.transpose(1, 0)
    # strides = strides.transpose(1, 0)
    # ndim = 3
    # out[:, 7::ndim, :] = (out[:, 7::ndim, :] * 2.0 + (anchors[0] - 0.5)) * strides / 640
    # out[:, 8::ndim, :] = (out[:, 8::ndim, :] * 2.0 + (anchors[1] - 0.5)) * strides / 640

    # 对推理结果进行阈值过滤和非极大抑制，得到归一化的终的关键点预测结果res
    res = nms_pose(prediction=out, conf_threshold=conf_threshold, nms_threshold=nms_threshold)[0]

    # 将坐标从0-1的归一化结果转换回原图
    if res is not None:
        res[:, 0:4] *= length
        res[:, 5:7] *= length
        res[:, 8:10] *= length
        res[:, 11:13] *= length
        res[:, 14:16] *= length
        res = res.tolist()
        return res
    else:
        print("No object is detected")
        return None
