"""
input: rknn model
output: see list in __init__()
	self.res_img
	self.org_img
	self.det_bboxes list [dict{id, score, top, left, right, bottom},...]

"""
import cv2
from std_msgs.msg import String
import os
import re
import numpy as np
import platform
from rknnlite.api import RKNNLite

class YoloRknn:  
    #INPUT_SIZE = 224
    #RK3588_RKNN_MODEL = 'best.rknn'
    #QUANTIZE_ON = True     
    def __init__(self, MODEL): 
        self.w = 1280
        self.h = 720
        self.org_img = np.ones((720, 1280, 3), dtype=np.uint8) * 255 
        self.res_img = np.ones((720, 1280, 3), dtype=np.uint8) * 255 
        self.det_bboxes = []
        self.OBJ_THRESH = 0.25  
        self.NMS_THRESH = 0.45
        self.IMG_SIZE = 640        
        self.CLASSES = ("ball","person","uav","car","tank")
        self.DEVICE_COMPATIBLE_NODE = '/proc/device-tree/compatible' 
        self.RKNN_MODEL = MODEL  
        self.rknn_model = MODEL
        self.host_name = self.get_host()
        if self.host_name == 'RK3588':
            rknn_model = self.RKNN_MODEL
        else:
            # print("This demo cannot run on the current platform: {}".format(host_name))
            exit(-1)
        self.rknn_lite = RKNNLite()
        # load RKNN model 
        # print('--> Load RKNN model')
        self.ret = self.rknn_lite.load_rknn(self.rknn_model)
        self.ret = self.rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0)

        if self.ret != 0:
            #print('Init runtime environment failed')
            exit(self.ret)
        if self.ret != 0:
            # print('Load RKNN model failed')
            exit(self.ret)
        # print('__init__ done')

    def get_host(self):# get platform and device type
        system = platform.system()
        machine = platform.machine()
        os_machine = system + '-' + machine
        if os_machine == 'Linux-aarch64':
            try:
                with open(self.DEVICE_COMPATIBLE_NODE) as f:
                    device_compatible_str = f.read()
                    if 'rk3588' in device_compatible_str:
                        host = 'RK3588'
                    elif 'rk3562' in device_compatible_str:
                        host = 'RK3562'
                    else:
                        host = 'RK3566_RK3568'
            except IOError:
                # print('Read device node {} failed.'.format(self.DEVICE_COMPATIBLE_NODE))
                exit(-1)
        else:
            host = os_machine
        return host

    def xywh2xyxy(self, x): # Convert [x, y, w, h] to [x1, y1, x2, y2]
        y = np.copy(x)
        y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
        y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
        y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
        y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
        return y
    
    def process(self, input, mask, anchors):
        anchors = [anchors[i] for i in mask]
        grid_h, grid_w = map(int, input.shape[0:2])
        box_confidence = input[..., 4]
        box_confidence = np.expand_dims(box_confidence, axis=-1)
        box_class_probs = input[..., 5:]
        box_xy = input[..., :2]*2 - 0.5
        col = np.tile(np.arange(0, grid_w), grid_w).reshape(-1, grid_w)
        row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_h)
        col = col.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
        row = row.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
        grid = np.concatenate((col, row), axis=-1)
        box_xy += grid
        box_xy *= int(self.IMG_SIZE/grid_h)
        box_wh = pow(input[..., 2:4]*2, 2)
        box_wh = box_wh * anchors
        box = np.concatenate((box_xy, box_wh), axis=-1)
        return box, box_confidence, box_class_probs

    def filter_boxes(self, boxes, box_confidences, box_class_probs):
        """Filter boxes with box threshold. It's a bit different with origin yolov5 post process!
        # Arguments
            boxes: ndarray, boxes of objects.
            box_confidences: ndarray, confidences of objects.
            box_class_probs: ndarray, class_probs of objects.

        # Returns
            boxes: ndarray, filtered boxes.
            classes: ndarray, classes for boxes.
            scores: ndarray, scores for boxes.
        """
        boxes = boxes.reshape(-1, 4)
        box_confidences = box_confidences.reshape(-1)
        box_class_probs = box_class_probs.reshape(-1, box_class_probs.shape[-1])
        _box_pos = np.where(box_confidences >= self.OBJ_THRESH)
        boxes = boxes[_box_pos]
        box_confidences = box_confidences[_box_pos]
        box_class_probs = box_class_probs[_box_pos]
        class_max_score = np.max(box_class_probs, axis=-1)
        classes = np.argmax(box_class_probs, axis=-1)
        _class_pos = np.where(class_max_score >= self.OBJ_THRESH)
        boxes = boxes[_class_pos]
        classes = classes[_class_pos]
        scores = (class_max_score* box_confidences)[_class_pos]
        return boxes, classes, scores

    def nms_boxes(self, boxes, scores):
        """Suppress non-maximal boxes.
        # Arguments
            boxes: ndarray, boxes of objects.
            scores: ndarray, scores of objects.
        # Returns
            keep: ndarray, index of effective boxes.
        """
        x = boxes[:, 0]
        y = boxes[:, 1]
        w = boxes[:, 2] - boxes[:, 0]
        h = boxes[:, 3] - boxes[:, 1]
        areas = w * h
        order = scores.argsort()[::-1]
        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)
            xx1 = np.maximum(x[i], x[order[1:]])
            yy1 = np.maximum(y[i], y[order[1:]])
            xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
            yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
            w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
            h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
            inter = w1 * h1
            ovr = inter / (areas[i] + areas[order[1:]] - inter)
            inds = np.where(ovr <= self.NMS_THRESH)[0]
            order = order[inds + 1]
        keep = np.array(keep)
        return keep

    def yolov5_post_process(self, input_data):
        masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
        anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]
        boxes, classes, scores = [], [], []
        for input, mask in zip(input_data, masks):
            b, c, s = self.process(input, mask, anchors)
            b, c, s = self.filter_boxes(b, c, s)
            boxes.append(b)
            classes.append(c)
            scores.append(s)
        boxes = np.concatenate(boxes)
        boxes = self.xywh2xyxy(boxes)
        classes = np.concatenate(classes)
        scores = np.concatenate(scores)
        nboxes, nclasses, nscores = [], [], []
        for c in set(classes):
            inds = np.where(classes == c)
            b = boxes[inds]
            c = classes[inds]
            s = scores[inds]
            keep = self.nms_boxes(b, s)
            nboxes.append(b[keep])
            nclasses.append(c[keep])
            nscores.append(s[keep])
        if not nclasses and not nscores:
            return None, None, None
        boxes = np.concatenate(nboxes)
        classes = np.concatenate(nclasses)
        scores = np.concatenate(nscores)
        return boxes, classes, scores

    def draw(self, image, boxes, scores, classes):
        """Draw the boxes on the image.
        # Argument:
            image: original image.
            boxes: ndarray, boxes of objects.
            classes: ndarray, classes of objects.
            scores: ndarray, scores of objects.
            all_classes: all classes name.
        """
        self.det_bboxes = []
        for box, score, cl in zip(boxes, scores, classes):
            if score > 0.5:
                top, left, right, bottom = box
                top = int(top)
                left = int(left)
                right = int(right)
                bottom = int(bottom)
    # BGR
                cv2.rectangle(image, (top, left), (right, bottom), (0, 100, 0), 1)
                cv2.putText(image, '{0} {1:.2f}'.format(self.CLASSES[cl], score),
                        (top, left - 6),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL,
                        0.5, (144, 95, 205), 1)
                # print("{:^12} {:^12.3f} [{:>4}, {:>4}, {:>4}, {:>4}]".format(self.CLASSES[cl], score, top, left, right, bottom))
                det_bbox = dict()
                det_bbox['id'] = self.CLASSES[cl]
                det_bbox['score'] = score
                det_bbox['top'] = int(left/640*self.w)
                det_bbox['left'] = int(top/640*self.h)
                det_bbox['right'] = int(right/640*self.h)
                det_bbox['bottom'] = int(bottom/640*self.w)
                # print("----------------------->")
                # print(det_bbox)
                self.det_bboxes.append(det_bbox)
                # print("---------------------->append")
        return image
    def det(self,image):#cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        #start_time = time.time() 
        img_width,img_height,channal = image.shape
        #print(image.shape)
        self.w = img_width
        self.h = img_height
        self.org_img = image
        img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (self.IMG_SIZE, self.IMG_SIZE))
        # Inference
        img2 = np.expand_dims(img, 0)
        outputs = self.rknn_lite.inference(inputs=[img2],data_format=['nhwc'])
        # post process
        input0_data = outputs[0]
        input1_data = outputs[1]
        input2_data = outputs[2]
        input0_data = input0_data.reshape([3, -1]+list(input0_data.shape[-2:]))
        input1_data = input1_data.reshape([3, -1]+list(input1_data.shape[-2:]))
        input2_data = input2_data.reshape([3, -1]+list(input2_data.shape[-2:]))
        input_data =  list()
        input_data.append(np.transpose(input0_data, (2, 3, 0, 1)))
        input_data.append(np.transpose(input1_data, (2, 3, 0, 1)))
        input_data.append(np.transpose(input2_data, (2, 3, 0, 1)))
        boxes, classes, scores = self.yolov5_post_process(input_data)
        #time3 = time.time()  
        #time_diff = (time3 - start_time) * 1000  # 转换为毫秒  
        #print("infer运行时间为：", time_diff, "毫秒")
        img_1 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        self.res_img = self.org_img
        if boxes is not None:
            #print('boxes')
            img_1=self.draw(img_1, boxes, scores, classes)
            #cv2.imwrite(str(num)+'result.jpg', img_1)
            img_1 = cv2.resize(img_1, (img_height, img_width))        
            img_2 = cv2.resize(img_1, (1280, 720)) 
            #cv2.imshow("Image window", img_2)
            cv2.waitKey(3)
            self.res_img = img_2

