import os
import numpy as np
import cv2 
from copy import copy

import de
from .base_tpu import TPU

board_min_l = 0
board_min_t = 0
board_min_r = 640
board_min_b = 640


def sigmoid(x):
    return 1. / (1. + np.exp(-x))


class PredictYolov8(TPU):
    def __init__(self):
        super().__init__()
        # 提取文件名, 输出对应的文件路径
        file_name = os.path.basename(__file__)
        print(f"{file_name}.py path : {self.pwd}")

        self.co_helper = COCO_test_helper(enable_letter_box=True)
        self.IMG_SIZE = [640, 640]

    def predict(self, img, image_size=[640, 640], OBJ_THRESH=0.25, NMS_THRESH=0.45):
        # 
        self.IMG_SIZE = image_size
        img = self.co_helper.letter_box(im= img.copy(), new_shape=(self.IMG_SIZE[1], self.IMG_SIZE[0]), pad_color=(0,0,0)) # 数据预处理
        data = self.cap_processing(img)    # 
        output = self.engine.predict(data)  # 推理
        # 后处理的返回结果的shape：  boxes: (box_num, 4), classes: (box_num), scores: (box_num)
        boxes, classes, scores = self.post_process(output, OBJ_THRESH, NMS_THRESH)   # 后处理, OBJ_THRESH目标框置信度阈值, NMS_THRESH:iou阈值

        # 将 box 转换为 [x, y, w, h, score, class] 格式
        result = []
        if boxes is None:
            return result
        for i in range(len(boxes)):
            x1, y1, x2, y2 = boxes[i]
            # x1 = int(x1 * 640 / self.IMG_SIZE[0])
            # y1 = int(y1 * 480 / self.IMG_SIZE[1])
            # x2 = int(x2 * 640 / self.IMG_SIZE[0])
            # y2 = int(y2 * 480 / self.IMG_SIZE[1])
            # 计算中心点坐标
            x = int((x1+x2)/2)
            y = int((y1+y2)/2) - 80
            w = x2 - x1  # 宽度
            h = y2 - y1  # 高度
            score = scores[i]  # 分数
            class_id = classes[i]  # 类别

            # 将所有信息合并为一个列表
            result.append([x, y, w, h, score, class_id])

        return result
        # return boxes, classes, scores

    # 视频画面处理
    def cap_processing(self, image, C=3):
        format = de.PixelFormat.DE_PIX_FMT_RGB888_PLANE
        # print("imgsz[0] :", imgsz[0], "imgsz[1] :", imgsz[1])
        # image = cv.resize(image, (imgsz[0], imgsz[1]))    # 数据预处理已经resize
        image = image[:, :, ::-1].transpose(2, 0, 1)  # BGR2RGB和HWC2CHW
        image = np.expand_dims(image, axis=0)    # 扩充矩阵维度

        shape = (image.shape[0], image.shape[1],
                 image.shape[2], image.shape[3])
        # shape = (1, C, H, W)    # 以RGB图像输入
        # 执行推理，data为每个样本的输入列表，batch时可配置多data
        data = [(format, shape, image)]
        return data

    # 输出后处理
    def post_process(self, input_data, OBJ_THRESH, NMS_THRESH):
        boxes, scores, classes_conf = [], [], []
        defualt_branch=3
        pair_per_branch = len(input_data)//defualt_branch
        # Python 忽略 score_sum 输出
        for i in range(defualt_branch):
            boxes.append(self.box_process(input_data[pair_per_branch*i]))   # 获取目标框
            classes_conf.append(sigmoid(input_data[pair_per_branch*i+1]))   # 获取类别分数
            scores.append(np.ones_like(input_data[pair_per_branch*i+1][:,:1,:,:], dtype=np.float32))    # 

        def sp_flatten(_in):
            ch = _in.shape[1]
            _in = _in.transpose(0,2,3,1)    
            return _in.reshape(-1, ch)

        boxes = [sp_flatten(_v) for _v in boxes]
        classes_conf = [sp_flatten(_v) for _v in classes_conf]
        scores = [sp_flatten(_v) for _v in scores]

        boxes = np.concatenate(boxes)
        classes_conf = np.concatenate(classes_conf)
        scores = np.concatenate(scores)

        # filter according to threshold
        boxes, classes, scores = self.filter_boxes(boxes, scores, classes_conf, OBJ_THRESH)  # 

        # nms
        nboxes, nclasses, nscores = [], [], []
        for c in set(classes):
            inds = np.where(classes == c)
            b = boxes[inds]
            c = classes[inds]
            s = scores[inds]
            keep = self.nms_boxes(b, s, NMS_THRESH)  # 

            if len(keep) != 0:
                nboxes.append(b[keep])
                nclasses.append(c[keep])
                nscores.append(s[keep])

        if not nclasses and not nscores:
            return None, None, None

        boxes = np.concatenate(nboxes)
        classes = np.concatenate(nclasses)
        scores = np.concatenate(nscores)

        return boxes, classes, scores
    
    def filter_boxes(self, boxes, box_confidences, box_class_probs, OBJ_THRESH):
        """Filter boxes with object threshold.
        """
        box_confidences = box_confidences.reshape(-1)
        candidate, class_num = box_class_probs.shape

        class_max_score = np.max(box_class_probs, axis=-1)
        classes = np.argmax(box_class_probs, axis=-1)

        _class_pos = np.where(class_max_score* box_confidences >= OBJ_THRESH)
        scores = (class_max_score* box_confidences)[_class_pos]

        boxes = boxes[_class_pos]
        classes = classes[_class_pos]

        return boxes, classes, scores

    def dfl(self, position):
        # Distribution Focal Loss (DFL)
        import torch
        x = torch.tensor(position)
        n,c,h,w = x.shape
        p_num = 4
        mc = c//p_num
        y = x.reshape(n,p_num,mc,h,w)
        y = y.softmax(2)
        acc_metrix = torch.tensor(range(mc)).float().reshape(1,1,mc,1,1)
        y = (y*acc_metrix).sum(2)
        return y.numpy()

    def box_process(self, position):
        grid_h, grid_w = position.shape[2:4]
        col, row = np.meshgrid(np.arange(0, grid_w), np.arange(0, grid_h))
        col = col.reshape(1, 1, grid_h, grid_w)
        row = row.reshape(1, 1, grid_h, grid_w)
        grid = np.concatenate((col, row), axis=1)
        stride = np.array([self.IMG_SIZE[1]//grid_h, self.IMG_SIZE[0]//grid_w]).reshape(1,2,1,1)

        position = self.dfl(position)
        box_xy  = grid +0.5 -position[:,0:2,:,:]
        box_xy2 = grid +0.5 +position[:,2:4,:,:]
        xyxy = np.concatenate((box_xy*stride, box_xy2*stride), axis=1)

        return xyxy
    
    def nms_boxes(self, boxes, scores, NMS_THRESH):
        """Suppress non-maximal boxes.
        # Returns
            keep: ndarray, index of effective boxes.
        """
        x = boxes[:, 0]
        y = boxes[:, 1]
        w = boxes[:, 2] - boxes[:, 0]
        h = boxes[:, 3] - boxes[:, 1]

        areas = w * h
        order = scores.argsort()[::-1]

        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)

            xx1 = np.maximum(x[i], x[order[1:]])
            yy1 = np.maximum(y[i], y[order[1:]])
            xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
            yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])

            w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
            h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
            inter = w1 * h1

            ovr = inter / (areas[i] + areas[order[1:]] - inter)
            inds = np.where(ovr <= NMS_THRESH)[0]
            order = order[inds + 1]
        keep = np.array(keep)
        return keep

class Letter_Box_Info():
    def __init__(self, shape, new_shape, w_ratio, h_ratio, dw, dh, pad_color) -> None:
        self.origin_shape = shape
        self.new_shape = new_shape
        self.w_ratio = w_ratio
        self.h_ratio = h_ratio
        self.dw = dw 
        self.dh = dh
        self.pad_color = pad_color

class COCO_test_helper():
    def __init__(self, enable_letter_box = False) -> None:
        self.record_list = []
        self.enable_ltter_box = enable_letter_box
        if self.enable_ltter_box is True:
            self.letter_box_info_list = []
        else:
            self.letter_box_info_list = None

    def letter_box(self, im, new_shape=(640, 640), pad_color=(0,0,0), info_need=False):
        # Resize and pad image while meeting stride-multiple constraints
        shape = im.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

        # Compute padding
        ratio = r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=pad_color)  # add border
        
        if self.enable_ltter_box is True:
            self.letter_box_info_list.append(Letter_Box_Info(shape, new_shape, ratio, ratio, dw, dh, pad_color))
        if info_need is True:
            return im, ratio, (dw, dh)
        else:
            return im

    def direct_resize(self, im, new_shape, info_need=False):
        shape = im.shape[:2]
        h_ratio = new_shape[0]/ shape[0]
        w_ratio = new_shape[1]/ shape[1]
        if self.enable_ltter_box is True:
            self.letter_box_info_list.append(Letter_Box_Info(shape, new_shape, w_ratio, h_ratio, 0, 0, (0,0,0)))
        im = cv2.resize(im, (new_shape[1], new_shape[0]))
        return im

    # 将resize后得到的坐标，转为真实size下的坐标
    def get_real_box(self, box, in_format='xyxy'):
        bbox = copy(box)
        if self.enable_ltter_box == True:
        # unletter_box result
            if in_format=='xyxy':
                bbox[:,0] -= self.letter_box_info_list[-1].dw
                bbox[:,0] /= self.letter_box_info_list[-1].w_ratio
                bbox[:,0] = np.clip(bbox[:,0], 0, self.letter_box_info_list[-1].origin_shape[1])

                bbox[:,1] -= self.letter_box_info_list[-1].dh
                bbox[:,1] /= self.letter_box_info_list[-1].h_ratio
                bbox[:,1] = np.clip(bbox[:,1], 0, self.letter_box_info_list[-1].origin_shape[0])

                bbox[:,2] -= self.letter_box_info_list[-1].dw
                bbox[:,2] /= self.letter_box_info_list[-1].w_ratio
                bbox[:,2] = np.clip(bbox[:,2], 0, self.letter_box_info_list[-1].origin_shape[1])

                bbox[:,3] -= self.letter_box_info_list[-1].dh
                bbox[:,3] /= self.letter_box_info_list[-1].h_ratio
                bbox[:,3] = np.clip(bbox[:,3], 0, self.letter_box_info_list[-1].origin_shape[0])
        return bbox
