import cv2
import numpy as np
import de
import os
from .base_tpu import TPU


class PredictYolov5(TPU):
    def __init__(self):
        super().__init__()
        # 提取文件名, 输出对应的文件路径
        file_name = os.path.basename(__file__)
        print(f"{file_name}.py path : {self.pwd}")

    def predict(self, img, imgsz, conf=None, iou=None):
        data = self.cap_processing(img, imgsz)
        output = self.engine.predict(data)
        results_box = []
        if conf or iou:
            outbox = self.outbox_processing(output, conf, iou)
            if output is not None:
                for o in outbox:
                    x1 = int(o[0] * 640 / imgsz[0])
                    y1 = int(o[1] * 480 / imgsz[1])
                    x2 = int(o[2] * 640 / imgsz[0])
                    y2 = int(o[3] * 480 / imgsz[1])

                    label = int(o[5])
                    score = float(o[4])

                    if x2 > 640:
                        x2 = 640
                    if x1 < 0:
                        x1 = 0
                    if y2 > 480:
                        y2 = 480
                    if y1 < 0:
                        y1 = 0
                    w = x2 - x1
                    h = y2 - y1
                    x = (x1 + w / 2 - 1)
                    y = (y1 + h / 2 - 1)
                    results_box.append([int(x), int(y), int(w), int(h), score, label])
                return results_box
        else:
            label = np.argmax(output[0])
            return label

    # 识别数据处理；org_box：原始数据、conf：置信度、iou：交并比
    def outbox_processing(self, org_box, conf, input_iou):
        output = []

        # 直接去除BOX的单维度，
        org_box = np.squeeze(org_box)

        # 删除置信度小于conf_thres的BOX
        conf = org_box[..., 4] > conf
        box = org_box[conf]

        cls_cinf = box[..., 5:]
        cls = []
        for i in range(len(cls_cinf)):
            cls.append(int(np.argmax(cls_cinf[i])))
        all_cls = list(set(cls))

        for i in range(len(all_cls)):
            curr_cls = all_cls[i]
            curr_cls_box = []
            curr_out_box = []
            # 遍历所有边界框
            for j in range(len(cls)):
                if cls[j] == curr_cls:  # 如果当前边界框的类别与当前类别匹配
                    box[j][5] = curr_cls  # 将第6列元素替换为当前类别下标
                    curr_cls_box.append(box[j][:6])  # 添加边界框信息到当前类别的列表中

            # 转换为NumPy数组
            curr_cls_box = np.array(curr_cls_box)
            curr_cls_box = self.xywh2xyxy(curr_cls_box)
            # 经过非极大抑制后输出的BOX下标
            curr_out_box = self.nms_iou(curr_cls_box, input_iou)

            # 将非极大抑制后的边界框添加到输出列表中
            for k in curr_out_box:
                output.append(curr_cls_box[k])
        output = np.array(output)
        return output

    # 视频画面处理
    def cap_processing(self, image, imgsz, C=3):
        format = de.PixelFormat.DE_PIX_FMT_RGB888_PLANE
#        print("imgsz[0] :", imgsz[0], "imgsz[1] :", imgsz[1])
        image = cv2.resize(image, (imgsz[0], imgsz[1]))
        image = image[:, :, ::-1].transpose(2, 0, 1)  # BGR2RGB和HWC2CHW
        image = np.expand_dims(image, axis=0)    # 扩充矩阵维度

        shape = (image.shape[0], image.shape[1],
                 image.shape[2], image.shape[3])
        # shape = (1, C, H, W)    # 以RGB图像输入
        # 执行推理，data为每个样本的输入列表，batch时可配置多data
        data = [(format, shape, image)]
        return data

    def xywh2xyxy(self, x):
        y = np.copy(x)
        y[:, 0] = x[:, 0] - x[:, 2] / 2
        y[:, 1] = x[:, 1] - x[:, 3] / 2
        y[:, 2] = x[:, 0] + x[:, 2] / 2
        y[:, 3] = x[:, 1] + x[:, 3] / 2
        return y

    # 识别框的得分非极大值抑制和iou过滤
    def nms_iou(self, box, input_iou):
        res = []
        # 提取边界框坐标、宽度、高度和得分信息
        x = box[:, 0]
        y = box[:, 1]
        w = box[:, 2]
        h = box[:, 3]
        # 计算边界框左上角和右下角坐标
        x1 = x - w / 2 + 1
        y1 = y - h / 2 + 1
        x2 = x + w / 2
        y2 = y + w / 2
        # 获取边界框得分
        scores = box[:, 5]
        # 计算每个边界框的面积
        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
        # 初始化结果列表和得分排序索引
        index = scores.argsort()[::-1]
        # print("index:", index)
        # 进行非极大值抑制操作
        while index.size > 0:
            i = index[0]
            res.append(i)
            # 计算当前边界框与其余边界框的交集部分的坐标
            x11 = np.maximum(x1[i], x1[index[1:]])
            y11 = np.maximum(y1[i], y1[index[1:]])
            x22 = np.minimum(x2[i], x2[index[1:]])
            y22 = np.minimum(y2[i], y2[index[1:]])
            # 计算交集部分的宽度和高度
            w = np.maximum(0, x22 - x11 + 1)
            h = np.maximum(0, y22 - y11 + 1)
            # 计算交集部分的面积
            overlaps = w * h
            # 计算交并比（IoU）
            actual_iou = overlaps / (areas[i] + areas[index[1:]] - overlaps)
            # 根据阈值进行过滤，移除重叠度高于阈值的边界框
            idx = np.where(actual_iou <= input_iou)[0]
            index = index[idx + 1]
        # 返回非极大值抑制后保留的边界框索引列表
        return res
