import numpy as np
import cv2


def xywh2xyxy(x):
    """
    对m行n列的yolov8神经网络预测结果进行转换，网络预测的每一行数据为[x,y,w,h,.....]，我们对每行的前4个数据机型操作，将其变成[x1,y1,x2,y2]形式
    """
    y = np.copy(x)
    y[..., 0] = x[..., 0] - x[..., 2] / 2
    y[..., 1] = x[..., 1] - x[..., 3] / 2
    y[..., 2] = x[..., 0] + x[..., 2] / 2
    y[..., 3] = x[..., 1] + x[..., 3] / 2
    return y.squeeze()


def xyxy2xywh(x):
    """
    xywh2xyxy的逆操作
    """
    y = np.copy(x)
    y[..., 0] = (x[..., 0] + x[..., 2]) / 2  # x center
    y[..., 1] = (x[..., 1] + x[..., 3]) / 2  # y center
    y[..., 2] = x[..., 2] - x[..., 0]  # width
    y[..., 3] = x[..., 3] - x[..., 1]  # height
    return y


def iou_batch(box: np.ndarray, box_array: np.ndarray):
    """
    一次性计算一个 box 与一组 box_array 中的所有 box 的交并比

    :param box: 第一个框的坐标，shape 为 (1行,n列)，前四列为(x, y, w, h)
    :param box_array: 第二组框的坐标，shape 为 (m行, n列)，前4列为(x, y, w, h)
    :return: box与box_array中所有框的交并比 iou的组合，shape 为 (n,)
    """
    # 计算 box与 box_array中所有框的交集的左上角和右下角坐标，没有交集则为负值。
    box = box.squeeze()
    x1 = np.maximum(box[0] - box[2] / 2, box_array[:, 0] - box_array[:, 2] / 2)
    y1 = np.maximum(box[1] - box[3] / 2, box_array[:, 1] - box_array[:, 3] / 2)
    x2 = np.minimum(box[0] + box[2] / 2, box_array[:, 0] + box_array[:, 2] / 2)
    y2 = np.minimum(box[1] + box[3] / 2, box_array[:, 1] + box_array[:, 3] / 2)

    # 计算交集面积
    inter_area = np.maximum(0, x2 - x1) * np.maximum(0, y2 - y1)

    # 计算 box1 和 box2 中每个框的面积
    box1_area = box[2] * box[3]
    box2_area = box_array[:, 2] * box_array[:, 3]

    # 计算并集面积
    union_area = box1_area + box2_area - inter_area

    # 返回所有交并比 iou
    return inter_area / union_area


def nms(prediction, conf_thres=0.80, nms_thres=0.8):
    nc = prediction.shape[1] - 4 - 8
    mi = 4 + nc
    xc = prediction[:, 4:mi].max(1) > conf_thres
    output = [None for _ in range(len(prediction))]
    for i, image_pred in enumerate(prediction):
        image_pred = np.transpose(image_pred)
        x = image_pred[xc[i]]
        [box, cls, points] = np.split(x, (4, 4 + nc), axis=1)
        conf, j = cls.max(1, keepdims=True), cls.argmax(1, keepdims=True)
        if not x.shape[0]:
            continue
        detections = np.concatenate((box, conf, j, points), 1)
        unique_labels = np.unique(detections[:, 5])
        for c in unique_labels:
            detections_class = detections[detections[:, 5] == c]
            detections_class = detections_class[(-detections_class[:, 4]).argsort()]
            max_detections = []
            while detections_class.shape[0]:
                max_detections.append(np.array([detections_class[0]]))
                if len(detections_class) == 1:
                    break
                ious = iou_batch(max_detections[-1], detections_class[1:])
                detections_class = detections_class[1:][ious < nms_thres]
            max_detections = np.concatenate(max_detections)
            output[i] = max_detections if output[i] is None else np.concatenate((output[i], max_detections))
    return output


def detect(image, model):
    height, width, _ = image.shape
    length = max(height, width)
    imageB = np.zeros((256, 256, 3), np.float32)
    s = 256 / length

    resized_img = cv2.resize(image, (0, 0), fx=s, fy=s)
    h, w, c = resized_img.shape
    imageB[0:h, 0:w] = resized_img

    imageB = np.array([imageB.transpose(2, 0, 1)])
    imageB = np.mean(imageB, axis=1, keepdims=True)
    imageB /= 255

    output = model.run(None, {"input": imageB})
    out = output[0]
    out[:, 0:4, :] = out[:, 0:4, :] * 256
    res = nms(prediction=out)[0]
    if res is not None:
        res[:, 0:4] /= s
        # res = res.astype(np.int32)
        res = res.tolist()
        return res
    else:
        print("No object is detected")
        return None
