import json
import sys
import cv2
import numpy as np
import onnx_predict.onnx_utils as onnx_utils
import matplotlib.pyplot as plt
import os


def make_anchors(feats, stride, grid_cell_offset=0.5):
    """
    根据网络输出和stride生成anchors和strides

    :param feats: shape为(batch_size, channels, height, width)的不同检测头输出的特征图组成的列表
    :param stride: 锚点总的跨度
    :param grid_cell_offset: 每个锚点的间距
    :return: 锚点坐标x，y组成的数组，以及每个锚点对应的stride
    """
    anchor_points, stride_tensor = [], []
    for i, stride in enumerate(stride):
        _, _, h, w = feats[i].shape
        sx = np.arange(w) + grid_cell_offset  # shift x
        sy = np.arange(h) + grid_cell_offset  # shift y
        sy, sx = np.meshgrid(sy, sx, indexing="ij")
        abcd = np.stack((sx, sy), -1)
        bcda = abcd.reshape(-1, 2)
        anchor_points.append(bcda)
        stride_tensor.append(np.full((h * w, 1), stride))
    return np.concatenate(anchor_points), np.concatenate(stride_tensor)


def softmax_ndarray(arr, axis):
    """
    对四维 ndarray 数组在指定维度上进行 softmax 操作。

    参数:
    arr (np.ndarray): 输入的四维数组。
    axis (int): 要进行 softmax 操作的维度。

    返回:
    np.ndarray: 经过 softmax 操作后的数组。
    """
    # 防止溢出：减去每个维度的最大值
    arr_max = np.max(arr, axis=axis, keepdims=True)
    exp_arr = np.exp(arr - arr_max)

    # 归一化：使得该维度上的元素和为1
    sum_exp_arr = np.sum(exp_arr, axis=axis, keepdims=True)

    # 返回softmax结果
    return exp_arr / sum_exp_arr


def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def dfl(bbox, point_num=2):
    """Distribution Focal Loss, DFL"""
    # 将模型输出的bbox根据reg_max加权求和得到后续的坐标
    reg_max = 16
    b, _, a = bbox.shape  # batch, channels, anchors
    bbox = bbox.reshape(b, 2 * point_num, reg_max, a).transpose(0, 2, 1, 3)
    bbox = softmax_ndarray(bbox, axis=1)
    bbox = np.arange(reg_max).reshape(1, reg_max, 1, 1) * bbox
    bbox = np.sum(bbox, axis=1, keepdims=True)
    return bbox.reshape(b, 2 * point_num, a)


def dist2bbox(distance, anchor_points, xywh=True, dim=1):

    lt, rb = np.split(distance, 2, axis=1)
    x1y1 = anchor_points - lt
    x2y2 = anchor_points + rb
    if xywh:
        c_xy = (x1y1 + x2y2) / 2
        wh = x2y2 - x1y1
        return np.concatenate((c_xy, wh), dim)  # xywh bbox
    return np.concatenate((x1y1, x2y2), dim)  # xyxy bbox


def yolo_decode(wait_for_decode, stride):
    detect_pose_data = wait_for_decode.transpose(0, 1, 2, 3)
    # 一个关键点对应的数据，可以为(x，y)或(x，y，score),对应了ndim=2或ndim=3
    ndim = 3
    reg_max = 16
    nc = detect_pose_data.shape[1] - reg_max * 4 - ndim * 4
    detect_data = detect_pose_data[:, 0 : reg_max * 4 + nc, :].reshape(detect_pose_data.shape[0], reg_max * 4 + nc, -1)
    pose_data = detect_pose_data[:, reg_max * 4 + nc : reg_max * 4 + nc + ndim * 4, :].reshape(
        detect_pose_data.shape[0], ndim * 4, -1
    )

    anchors, strides = (x for x in make_anchors([detect_pose_data], stride, 0.5))
    anchors = anchors.transpose(1, 0)
    strides = strides.transpose(1, 0)
    bbox = detect_data[:, 0 : reg_max * 4, :]
    cls = detect_data[:, reg_max * 4 :, :]
    dfled_bbox = dfl(bbox)
    dbox = dist2bbox(dfled_bbox, anchors, xywh=True, dim=1) * strides / 256

    cls = sigmoid(cls)
    detect_data = np.concatenate((dbox, cls), axis=1)

    pose_data[:, 0::ndim, :] = (pose_data[:, 0::ndim, :] * 2.0 + (anchors[0] - 0.5)) * strides / 256
    pose_data[:, 1::ndim, :] = (pose_data[:, 1::ndim, :] * 2.0 + (anchors[1] - 0.5)) * strides / 256
    pose_data[:, 2::ndim, :] = sigmoid(pose_data[:, 2::ndim, :])

    predicted_data = np.concatenate((detect_data, pose_data), axis=1)
    return predicted_data


if __name__ == "__main__":
    head1 = np.zeros((1, 1, 80, 80))
    head2 = np.zeros((1, 1, 40, 40))
    head3 = np.zeros((1, 1, 20, 20))
    anchors, strides = (x for x in make_anchors([head1, head2, head3], [8, 16, 32], 0.5))
    anchors = anchors.transpose(1, 0)
    strides = strides.transpose(1, 0)
    anchors_1 = anchors[0]
    anchors_2 = anchors[1]
    strides = strides[0]
    print(anchors_1.shape)
    print(anchors_2.shape)
    np.savetxt("anchors_1.txt", anchors_1, fmt="%.1f", newline="f,")
    np.savetxt("anchors_2.txt", anchors_2, fmt="%.1f", newline="f,")
    np.savetxt("strides.txt", strides, fmt="%.1f", newline="f,")
