import os
import kp
import torch
import numpy as np
import cv2
from copy import copy

from .base_tpu import TPU


class PredictYolov8(TPU):
    def __init__(self):
        super().__init__()
        # 提取文件名, 输出对应的文件路径
        file_name = os.path.basename(__file__)
        print(f"{file_name}.py path : {self.pwd}")
        self.co_helper = COCO_test_helper(enable_letter_box=True)


    def predict(self, img, imgsz, conf=None, iou=None, model_nth=0):
        results_box = []
        if conf or iou:
            # if imgsz[0] > 640 or imgsz[1] > 480:
            #     img = cv2.resize(img, (imgsz[0], imgsz[1]))
            # else:
            #     imgsz = [640, 480]
            self.IMG_SIZE = imgsz
            img = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2BGR565)
            img = self.co_helper.letter_box(im= img.copy(), new_shape=(self.IMG_SIZE[1], self.IMG_SIZE[0]), pad_color=(0,0,0)) # 数据预处理
            inf_node_output_list = self.after_poscessor_yolov8(model_nth, img)
            boxes, classes, scores = self.post_process(inf_node_output_list, conf, iou)
            # 将 box 转换为 [x, y, w, h, score, class] 格式
            result = []
            if boxes is None:
                return result
            for i in range(len(boxes)):
                x1, y1, x2, y2 = boxes[i]
                # x1 = int(x1 * 640 / self.IMG_SIZE[0])
                # y1 = int(y1 * 480 / self.IMG_SIZE[1])
                # x2 = int(x2 * 640 / self.IMG_SIZE[0])
                # y2 = int(y2 * 480 / self.IMG_SIZE[1])
                # 计算中心点坐标
                x = int((x1+x2)/2)
                y = int((y1+y2)/2) - 80
                w = x2 - x1  # 宽度
                h = y2 - y1  # 高度
                score = scores[i]  # 分数
                class_id = classes[i]  # 类别

                # 将所有信息合并为一个列表
                result.append([x, y, w, h, score, class_id])
            return result
    
    def release(self):
        try:
            kp.core.disconnect_devices(self.device_group)
            print('TPU释放成功')
        except kp.ApiKPException as exception:
            print('Error: release device fail, error = \'{}\''.format(str(exception)))
            exit(0)

    def reboot(self):
        print('rebooting device...')

    def after_poscessor_yolov8(self, model_nth, img):

        generic_inference_input_descriptor = kp.GenericImageInferenceDescriptor(
            model_id=self.model_nef_descriptor.models[0].id,
            inference_number=0,
            input_node_image_list=[
                kp.GenericInputNodeImage(
                    image=img,
                    image_format=kp.ImageFormat.KP_IMAGE_FORMAT_RGB565,
                    resize_mode=kp.ResizeMode.KP_RESIZE_ENABLE,
                    padding_mode=kp.PaddingMode.KP_PADDING_CORNER,
                    normalize_mode=kp.NormalizeMode.KP_NORMALIZE_YOLO
                )
            ]
        )

        try:
            kp.inference.generic_image_inference_send(device_group=self.device_group,
                                                        generic_inference_input_descriptor=generic_inference_input_descriptor)
            generic_raw_result = kp.inference.generic_image_inference_receive(device_group=self.device_group)
        except kp.ApiKPException as exception:
            print(' - Error: inference failed, error = {}'.format(exception))
            exit(0)

        inf_node_output_list = []

        for node_idx in range(generic_raw_result.header.num_output_node):
            inference_float_node_output = kp.inference.generic_inference_retrieve_float_node(node_idx=node_idx,
                                                                                            generic_raw_result=generic_raw_result,
                                                                                            channels_ordering=kp.ChannelOrdering.KP_CHANNEL_ORDERING_CHW
                                                                                            )
            inf_node_output_list.append(inference_float_node_output)
        return inf_node_output_list

    def dfl(self, position):
        # Distribution Focal Loss (DFL)
        import torch
        x = torch.tensor(position)
        n,c,h,w = x.shape
        p_num = 4
        mc = c//p_num
        y = x.reshape(n,p_num,mc,h,w)   # (1, 4, 16, 80, 80)
        y = y.softmax(2)
        acc_metrix = torch.tensor(range(mc)).float().reshape(1,1,mc,1,1)    # (1, 1, 16, 1, 1)
        y = (y*acc_metrix).sum(2)
        return y.numpy()

    def box_process(self, position):
        grid_h, grid_w = position.shape[2:4]    # (1, 64, 80, 80)
        col, row = np.meshgrid(np.arange(0, grid_w), np.arange(0, grid_h))  # (1, 1, 80,80) (80,80)
        col = col.reshape(1, 1, grid_h, grid_w) # (1, 1, 80,80)
        row = row.reshape(1, 1, grid_h, grid_w) # (1, 1, 80,80)
        grid = np.concatenate((col, row), axis=1)   # (1, 2, 80,80)
        stride = np.array([self.IMG_SIZE[1]//grid_h, self.IMG_SIZE[0]//grid_w]).reshape(1,2,1,1)

        position = self.dfl(position)            # (1, 64, 80, 80) -> (1, 4, 80, 80)
        box_xy  = grid +0.5 -position[:,0:2,:,:]    # 
        box_xy2 = grid +0.5 +position[:,2:4,:,:]
        xyxy = np.concatenate((box_xy*stride, box_xy2*stride), axis=1)

        return xyxy
    
    def filter_boxes(self, boxes, box_confidences, box_class_probs, OBJ_THRESH):
        """Filter boxes with object threshold.
        """
        box_confidences = box_confidences.reshape(-1)   # box_confidences默认为1
        candidate, class_num = box_class_probs.shape

        class_max_score = np.max(box_class_probs, axis=-1)  # 获得类别分数最大值
        classes = np.argmax(box_class_probs, axis=-1)       # 分数最大类别的id

        _class_pos = np.where(class_max_score* box_confidences >= OBJ_THRESH)   # 选择大于置信度阈值的目标框
        scores = (class_max_score* box_confidences)[_class_pos] 

        boxes = boxes[_class_pos]
        classes = classes[_class_pos]

        return boxes, classes, scores
    
    def nms_boxes(self, boxes, scores, NMS_THRESH):
        """Suppress non-maximal boxes.
        # Returns
            keep: ndarray, index of effective boxes.
        """
        x = boxes[:, 0]
        y = boxes[:, 1]
        w = boxes[:, 2] - boxes[:, 0]
        h = boxes[:, 3] - boxes[:, 1]

        areas = w * h
        order = scores.argsort()[::-1]  # 按分数从大到小排序

        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)
            # 计算iou
            xx1 = np.maximum(x[i], x[order[1:]])    # 比较两个或更多个数组元素，并返回每个元素的最大值
            yy1 = np.maximum(y[i], y[order[1:]])
            xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
            yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])

            w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
            h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
            inter = w1 * h1

            ovr = inter / (areas[i] + areas[order[1:]] - inter)
            inds = np.where(ovr <= NMS_THRESH)[0]
            order = order[inds + 1]
        keep = np.array(keep)
        return keep

    def post_process(self, input_data, OBJ_THRESH, NMS_THRESH):
        boxes, scores, classes_conf = [], [], []
        defualt_branch=3        # 3
        
        pair_per_branch = len(input_data)//defualt_branch
        # print(input_data[0].ndarray[0][0][0])
        # Python 忽略 score_sum 输出
        for i in range(defualt_branch):
            data1 = input_data[pair_per_branch*i].ndarray
            data2 = input_data[pair_per_branch*i+1].ndarray
            # print("data1:", data1.shape)
            # print("data2:", data2.shape)
            
            boxes.append(self.box_process(data1))    # len(input_data): 9
            classes_conf.append(sigmoid(data2))        # (1, nc, 80, 80)
            scores.append(np.ones_like(data2[:,:1,:,:], dtype=np.float32))    # (1, 1, 80, 80)

        def sp_flatten(_in):
            ch = _in.shape[1]   # (1, 4, 80, 80)
            _in = _in.transpose(0,2,3,1)    # (1, 80, 80, 4)
            return _in.reshape(-1, ch)      # (6400, 4)

        # for i in range(len(classes_conf)):
        #     print(classes_conf[i].shape)
        
        boxes = [sp_flatten(_v) for _v in boxes]    # (6400, 4) (1600, 4) (400, 4)  # 目标框数量,xyx1y1
        classes_conf = [sp_flatten(_v) for _v in classes_conf]  # # (6400, 4) (1600, 4) (400, 4)    # 目标框数量, 类别数量
        scores = [sp_flatten(_v) for _v in scores]

        
        
        boxes = np.concatenate(boxes)
        classes_conf = np.concatenate(classes_conf)
        scores = np.concatenate(scores)

        # filter according to threshold
        boxes, classes, scores = self.filter_boxes(boxes, scores, classes_conf, OBJ_THRESH)# 

        # nms
        nboxes, nclasses, nscores = [], [], []
        for c in set(classes):
            inds = np.where(classes == c)
            b = boxes[inds]     # 获取同一个类别的所有boxes
            c = classes[inds]   # 获取同一个类别的所有classes
            s = scores[inds]    # 获取同一个类别的所有scores
            keep = self.nms_boxes(b, s, NMS_THRESH)  # 去除iou大于阈值的数据

            if len(keep) != 0:
                nboxes.append(b[keep])
                nclasses.append(c[keep])
                nscores.append(s[keep])

        if not nclasses and not nscores:
            return None, None, None

        boxes = np.concatenate(nboxes)
        classes = np.concatenate(nclasses)
        scores = np.concatenate(nscores)

        return boxes, classes, scores


def sigmoid(x):
    return 1. / (1. + np.exp(-x))


class Letter_Box_Info():
    def __init__(self, shape, new_shape, w_ratio, h_ratio, dw, dh, pad_color) -> None:
        self.origin_shape = shape
        self.new_shape = new_shape
        self.w_ratio = w_ratio
        self.h_ratio = h_ratio
        self.dw = dw 
        self.dh = dh
        self.pad_color = pad_color


class COCO_test_helper():
    def __init__(self, enable_letter_box = False) -> None:
        self.record_list = []
        self.enable_ltter_box = enable_letter_box
        if self.enable_ltter_box is True:
            self.letter_box_info_list = []
        else:
            self.letter_box_info_list = None
        
        # letter_box_info = self.init_letter_box_info()
        # self.letter_box_info_list.append(letter_box_info)
        

    def get_letter_box_info(self, shape, new_shape=(640, 640), pad_color=(0,0,0)):
        
        # Scale ratio
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

        # Compute padding
        ratio = r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

        dw /= 2  # divide padding into 2 sides
        dh /= 2
        
        self.letter_box_info_list.append(Letter_Box_Info(shape, new_shape, ratio, ratio, dw, dh, pad_color))
    

    def letter_box(self, im, new_shape=(640, 640), pad_color=(0,0,0), info_need=False):
        # Resize and pad image while meeting stride-multiple constraints
        shape = im.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

        # Compute padding
        ratio = r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)  ####
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=pad_color)  # add border
        
        if self.enable_ltter_box is True:
            self.letter_box_info_list.append(Letter_Box_Info(shape, new_shape, ratio, ratio, dw, dh, pad_color))
        if info_need is True:
            return im, ratio, (dw, dh)
        else:
            return im

    def direct_resize(self, im, new_shape, info_need=False):
        shape = im.shape[:2]
        h_ratio = new_shape[0]/ shape[0]
        w_ratio = new_shape[1]/ shape[1]
        if self.enable_ltter_box is True:
            self.letter_box_info_list.append(Letter_Box_Info(shape, new_shape, w_ratio, h_ratio, 0, 0, (0,0,0)))
        im = cv2.resize(im, (new_shape[1], new_shape[0]))
        return im

    def get_real_box(self, box, in_format='xyxy'):
        bbox = copy(box)
        if self.enable_ltter_box == True:
        # unletter_box result
            if in_format=='xyxy':
                
                #######
                print(self.letter_box_info_list)
                print(bbox[:,0].shape)
                
                bbox[:,0] -= self.letter_box_info_list[-1].dw
                bbox[:,0] /= self.letter_box_info_list[-1].w_ratio
                bbox[:,0] = np.clip(bbox[:,0], 0, self.letter_box_info_list[-1].origin_shape[1])

                bbox[:,1] -= self.letter_box_info_list[-1].dh
                bbox[:,1] /= self.letter_box_info_list[-1].h_ratio
                bbox[:,1] = np.clip(bbox[:,1], 0, self.letter_box_info_list[-1].origin_shape[0])

                bbox[:,2] -= self.letter_box_info_list[-1].dw
                bbox[:,2] /= self.letter_box_info_list[-1].w_ratio
                bbox[:,2] = np.clip(bbox[:,2], 0, self.letter_box_info_list[-1].origin_shape[1])

                bbox[:,3] -= self.letter_box_info_list[-1].dh
                bbox[:,3] /= self.letter_box_info_list[-1].h_ratio
                bbox[:,3] = np.clip(bbox[:,3], 0, self.letter_box_info_list[-1].origin_shape[0])
        return bbox