import os
import cv2
import numpy as np
import random
import time
from queue import Queue
from threading import Thread
from rknnlite.api import RKNNLite
from PIL import Image, ImageDraw, ImageFont

from .utils.coco_utils import COCO_test_helper


# 下面的是固定参数
model_type = 'yolov7'
current_dir = os.path.dirname(__file__)

# 下面是可配置参数
OBJ_THRESH = 0.6
NMS_THRESH = 0.45

IMG_SIZE = (640, 640)  # (width, height), such as (1280, 736)

# 后面就不用动了
with open(os.path.join(current_dir, 'classes.txt'), 'r') as f:
    CLASSES = [_v.strip() for _v in f.readlines() if _v.strip()]

with open(os.path.join(current_dir, 'anchors.txt'), 'r') as f:
    ANCHORS = np.array([float(_v) for _v in f.readlines()]).reshape(3,-1,2).tolist()

font = ImageFont.truetype(os.path.join(current_dir, 'simhei.ttf'), 22)

colors = tuple(tuple(random.randint(0, 255) for _ in range(3)) for _ in range(len(CLASSES)))

co_helper = COCO_test_helper(enable_letter_box=True)

cutout_mask = None
highlight_pts = None
highlight_mask = None

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def filter_boxes(boxes, box_confidences, box_class_probs):
    """Filter boxes with object threshold.
    """
    box_confidences = box_confidences.reshape(-1)
    candidate, class_num = box_class_probs.shape

    class_max_score = np.max(box_class_probs, axis=-1)
    classes = np.argmax(box_class_probs, axis=-1)

    if model_type == 'yolov7' and class_num==1:
        _class_pos = np.where(box_confidences >= OBJ_THRESH)
        scores = (box_confidences)[_class_pos]
    else:
        _class_pos = np.where(class_max_score* box_confidences >= OBJ_THRESH)
        scores = (class_max_score* box_confidences)[_class_pos]

    boxes = boxes[_class_pos]
    classes = classes[_class_pos]

    return boxes, classes, scores

def nms_boxes(boxes, scores):
    """Suppress non-maximal boxes.
    # Returns
        keep: ndarray, index of effective boxes.
    """
    x = boxes[:, 0]
    y = boxes[:, 1]
    w = boxes[:, 2] - boxes[:, 0]
    h = boxes[:, 3] - boxes[:, 1]

    areas = w * h
    order = scores.argsort()[::-1]

    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)

        xx1 = np.maximum(x[i], x[order[1:]])
        yy1 = np.maximum(y[i], y[order[1:]])
        xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
        yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])

        w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
        h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
        inter = w1 * h1

        ovr = inter / (areas[i] + areas[order[1:]] - inter)
        inds = np.where(ovr <= NMS_THRESH)[0]
        order = order[inds + 1]
    keep = np.array(keep)
    return keep

def box_process(position, anchors):
    grid_h, grid_w = position.shape[2:4]
    col, row = np.meshgrid(np.arange(0, grid_w), np.arange(0, grid_h))
    col = col.reshape(1, 1, grid_h, grid_w)
    row = row.reshape(1, 1, grid_h, grid_w)
    grid = np.concatenate((col, row), axis=1)
    stride = np.array([IMG_SIZE[1]//grid_h, IMG_SIZE[0]//grid_w]).reshape(1,2,1,1)

    if model_type in ['yolov5', 'yolov7', 'yolox']:
        # output format: xywh -> xyxy
        if model_type == 'yolox':
            box_xy = position[:,:2,:,:]
            box_wh = np.exp(position[:,2:4,:,:]) * stride
        elif model_type in ['yolov5', 'yolov7']:
            col = col.repeat(len(anchors), axis=0)
            row = row.repeat(len(anchors), axis=0)
            anchors = np.array(anchors)
            anchors = anchors.reshape(*anchors.shape, 1, 1)

            box_xy = position[:,:2,:,:]*2 - 0.5
            box_wh = pow(position[:,2:4,:,:]*2, 2) * anchors

        box_xy += grid
        box_xy *= stride
        box = np.concatenate((box_xy, box_wh), axis=1)

        # Convert [c_x, c_y, w, h] to [x1, y1, x2, y2]
        xyxy = np.copy(box)
        xyxy[:, 0, :, :] = box[:, 0, :, :] - box[:, 2, :, :]/ 2  # top left x
        xyxy[:, 1, :, :] = box[:, 1, :, :] - box[:, 3, :, :]/ 2  # top left y
        xyxy[:, 2, :, :] = box[:, 0, :, :] + box[:, 2, :, :]/ 2  # bottom right x
        xyxy[:, 3, :, :] = box[:, 1, :, :] + box[:, 3, :, :]/ 2  # bottom right y

    elif model_type == 'yolov6':
        box_xy  = grid +0.5 -position[:,0:2,:,:]
        box_xy2 = grid +0.5 +position[:,2:4,:,:]
        xyxy = np.concatenate((box_xy*stride, box_xy2*stride), axis=1)

    return xyxy

def post_process(input_data, anchors):
    boxes, scores, classes_conf = [], [], []
    if model_type in ['yolov5', 'yolov7', 'yolox']:
        # 1*255*h*w -> 3*85*h*w
        input_data = [_in.reshape([len(anchors[0]),-1]+list(_in.shape[-2:])) for _in in input_data]
        for i in range(len(input_data)):
            boxes.append(box_process(input_data[i][:,:4,:,:], anchors[i]))
            scores.append(input_data[i][:,4:5,:,:])
            classes_conf.append(input_data[i][:,5:,:,:])
    elif model_type in ['yolov6', 'yolov8', 'ppyoloe_plus']:
        defualt_branch=3
        for i in range(defualt_branch):
            boxes.append(box_process(input_data[2*i], None))
            classes_conf.append(input_data[2*i+1])
            scores.append(np.ones_like(input_data[2*i+1][:,:1,:,:], dtype=np.float32))

    def sp_flatten(_in):
        ch = _in.shape[1]
        _in = _in.transpose(0,2,3,1)
        return _in.reshape(-1, ch)

    boxes = [sp_flatten(_v) for _v in boxes]
    classes_conf = [sp_flatten(_v) for _v in classes_conf]
    scores = [sp_flatten(_v) for _v in scores]

    boxes = np.concatenate(boxes)
    classes_conf = np.concatenate(classes_conf)
    scores = np.concatenate(scores)

    # filter according to threshold
    boxes, classes, scores = filter_boxes(boxes, scores, classes_conf)

    # nms
    nboxes, nclasses, nscores = [], [], []
    for c in set(classes):
        inds = np.where(classes == c)
        b = boxes[inds]
        c = classes[inds]
        s = scores[inds]
        keep = nms_boxes(b, s)

        if len(keep) != 0:
            nboxes.append(b[keep])
            nclasses.append(c[keep])
            nscores.append(s[keep])

    if not nclasses and not nscores:
        return None, None, None

    boxes = np.concatenate(nboxes)
    classes = np.concatenate(nclasses)
    scores = np.concatenate(nscores)

    return boxes, classes, scores

# 用Pillow画图，但是耗时比cv搞一个量级，不然没法显示中文
# 不会在传入图像上画，返回一个画好的新图
def draw_results(image, results):
    image = Image.fromarray(image)
    draw = ImageDraw.Draw(image)
    for box, cl, score in results:
        text = CLASSES[cl]
        draw.rectangle(box, outline=colors[cl], width=2)
        label_box = draw.textbbox((0,0), text, font)
        y = box[1] - label_box[3]
        if y < 0:
            y = box[3] + 1
        draw.text((box[0], y), text, colors[cl], font)
    return np.asarray(image)

# 直接在传入图像上画
# def draw_results(image, results):
#     for box, cl, score in results:
#         top, left, right, bottom = box

#         cv2.rectangle(image, (top, left), (right, bottom), colors[cl], 2)
#         cv2.putText(image, 
#                     CLASSES[cl],
#                     (top, left - 6),
#                     cv2.FONT_HERSHEY_COMPLEX,
#                     0.6, colors[cl], 2)

def set_mask(polies, image_size_wh):
    global cutout_mask, highlight_mask, highlight_pts
    cutout_mask_pts, highlight_pts = [], []
    print(polies)
    for poly in polies:
        cutout_mask_pt = np.array(poly).reshape(-1, 2)
        cutout_mask_pt[:, 0] *= IMG_SIZE[0]
        cutout_mask_pt[:, 1] *= IMG_SIZE[1]
        cutout_mask_pt = cutout_mask_pt.astype(np.int32)
        cutout_mask_pts.append(cutout_mask_pt)

        highlight_pt = np.array(poly).reshape(-1, 2)
        highlight_pt[:, 0] *= image_size_wh[0]
        highlight_pt[:, 1] *= image_size_wh[1]
        highlight_pt = highlight_pt.astype(np.int32)
        highlight_pts.append(highlight_pt)

    cutout_mask = cv2.fillPoly(
        np.zeros(IMG_SIZE[::-1], dtype=np.uint8), 
        cutout_mask_pts, 
        (255, 255, 255)
    )
    highlight_mask = cv2.fillPoly(
        np.zeros((*image_size_wh[::-1], 3), dtype=np.uint8), 
        highlight_pts, 
        (0, 165, 255)
    )

# 不修改原图
def make_mask(image):
    return cv2.add(image, 0, mask=cutout_mask) if cutout_mask is not None else image

# 直接在传入图像上画
def draw_mask(image):
    if highlight_mask is not None:
        cv2.addWeighted(image, 1, highlight_mask, 0.2, 0, image)
        cv2.polylines(image, highlight_pts, True, (255, 255, 0), 1)

def detect(model, image):
    img, ratio, (dw, dh) = co_helper.letter_box(im=image, new_shape=(IMG_SIZE[1], IMG_SIZE[0]), pad_color=(0,0,0), info_need=True)

    outputs = model.inference([cv2.cvtColor(make_mask(img), cv2.COLOR_BGR2RGB)])
    boxes, classes, scores = post_process(outputs, ANCHORS)

    if boxes is not None:
        for i in range(len(boxes)):
            bbox = boxes[i]
            bbox[0] -= dw
            bbox[1] -= dh
            bbox[2] -= dw
            bbox[3] -= dh
            boxes[i] = [value/ratio for value in bbox]
        
    return [(
        tuple(map(int, item[0])),  # box
        item[1],  # name
        item[2]  # score
     ) for item in zip(boxes, classes, scores)] if boxes is not None else []

def set_obj_thresh(thresh):
    global OBJ_THRESH
    OBJ_THRESH = thresh

# 多线程
class RKNNPool:
    NPU_INDEX = (
        RKNNLite.NPU_CORE_0,
        RKNNLite.NPU_CORE_1,
        RKNNLite.NPU_CORE_2,
    )

    def __init__(self, num):
        self._result_queue = Queue()
        self._detect_queue = Queue()
        for i in range(num):
            Thread(target=self._daemon, args=(i%3,), daemon=True).start()

    def _daemon(self, npu_index):
        model = RKNNLite()
        model.load_rknn(os.path.join(current_dir, 'model.rknn'))
        model.init_runtime(core_mask=self.NPU_INDEX[npu_index])
        while True:
            image = self.detect_queue.get()
            self._result_queue.put((time.perf_counter(), image, detect(model, image)))  # 第一个参数当成时间戳作为图像id
    
    @property
    def result_queue(self):
        return self._result_queue
    
    @property
    def detect_queue(self):
        return self._detect_queue
