#!/usr/bin/env python3

import rospy
import numpy as np
import os,cv2
import math
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image, CompressedImage
from vision_msgs.msg import BoundingBox2D, Detection2DArray,ObjectHypothesisWithPose,Detection2D
from hide_warnings import hide_warnings


class TrafficDetect():
    def __init__(self):
        self.rate = rospy.get_param('~rate', 10)
        self.camera_topic = rospy.get_param('~camera_topic', '/camera/rgb/image_raw/')
        r = rospy.Rate(self.rate)
        rospy.on_shutdown(self.shutdown)

        self.classes_name = ("person", "bicycle", "car", "motorbike ", "aeroplane ", "bus ", "train", "truck ", "boat", "traffic light",
           "fire hydrant", "stop sign ", "parking meter", "bench", "bird", "cat", "dog ", "horse ", "sheep", "cow",
           "elephant",
           "bear", "zebra ", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis",
           "snowboard", "sports ball", "kite",
           "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup",
           "fork", "knife ",
           "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza ", "donut",
           "cake", "chair", "sofa",
           "pottedplant", "bed", "diningtable", "toilet ", "tvmonitor", "laptop ", "mouse       ", "remote ",
           "keyboard ", "cell phone", "microwave ",
           "oven ", "toaster", "sink", "refrigerator ", "book", "clock", "vase", "scissors ", "teddy bear ",
           "hair drier", "toothbrush ")


        self.sub_image_type = "raw"
        self.pub_type = "raw"
        self.cv_image = None

        self.obj_thresh = 0.25
        self.nms_thresh = 0.45
        self.img_size = (640, 640)
        
        dir_path = os.path.dirname(os.path.realpath(__file__))
        dir_path = dir_path.replace('rikibot_object_rknn/node', 'rikibot_object_rknn/')

        self.rknn_model = dir_path + 'conf/yolov5s.rknn'

        if self.sub_image_type == "compressed":
            self.sub_image_original = rospy.Subscriber(self.camera_topic+'compressed', CompressedImage, self.ImageCallback, queue_size = 1)
        elif self.sub_image_type == "raw":
            self.sub_image_original = rospy.Subscriber(self.camera_topic, Image, self.ImageCallback, queue_size = 1)


        if self.pub_type == "compressed":
            self.pub_image = rospy.Publisher('/object_image/compressed', CompressedImage, queue_size=1)
        else:
            self.pub_image = rospy.Publisher('/object_image', Image, queue_size=1)

        self.det_pub = rospy.Publisher("/rikibot_detect_node/detections", Detection2DArray, queue_size=10)

        self.cvBridge = CvBridge()
        #import rknnlite lib
        from rknnlite.api import RKNNLite
        # Create RKNNLite object
        self.rknn_lite = RKNNLite()

        # load RKNN model
        self.rknn_lite.load_rknn(self.rknn_model)

        # Init runtime environment
        self.rknn_lite.init_runtime()
       
        while not rospy.is_shutdown():
            if self.cv_image is not None:
                img, ratio, (dw, dh) = self.letterbox(self.cv_image, new_shape=(640, 640))
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                #outputs = self.rknn_lite.inference(inputs=[img])
                outputs = self.inference(img)
                boxes, classes, scores = self.yolov5_post_process(outputs)
                img_orig = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                if boxes is not None:
                    self.draw(img_orig, boxes, scores, classes)
                
                if self.pub_type == "compressed":
                # publishes traffic sign image in compressed type
                    self.pub_image.publish(self.cvBridge.cv2_to_compressed_imgmsg(img_orig, "jpg"))

                elif self.pub_type == "raw":
                # publishes traffic sign image in raw type
                    self.pub_image.publish(self.cvBridge.cv2_to_imgmsg(img_orig, "bgr8"))
            r.sleep()


    def ImageCallback(self, image_msg):
        if self.sub_image_type == "compressed":
            #converting compressed image to opencv image
            np_arr = np.fromstring(image_msg.data, np.uint8)
            self.cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        elif self.sub_image_type == "raw":
            self.cv_image = self.cvBridge.imgmsg_to_cv2(image_msg, "bgr8")

    @hide_warnings
    def inference(self, image):
        return self.rknn_lite.inference(inputs=[image])

    def filter_boxes(self, boxes, box_confidences, box_class_probs):
        """Filter boxes with object threshold.
        """
        box_confidences = box_confidences.reshape(-1)
        candidate, class_num = box_class_probs.shape

        class_max_score = np.max(box_class_probs, axis=-1)
        classes = np.argmax(box_class_probs, axis=-1)

        _class_pos = np.where(class_max_score* box_confidences >= self.obj_thresh)
        scores = (class_max_score* box_confidences)[_class_pos]

        boxes = boxes[_class_pos]
        classes = classes[_class_pos]

        return boxes, classes, scores

    def nms_boxes(self, boxes, scores):
        """Suppress non-maximal boxes.
        # Returns
            keep: ndarray, index of effective boxes.
        """
        x = boxes[:, 0]
        y = boxes[:, 1]
        w = boxes[:, 2] - boxes[:, 0]
        h = boxes[:, 3] - boxes[:, 1]

        areas = w * h
        order = scores.argsort()[::-1]

        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)

            xx1 = np.maximum(x[i], x[order[1:]])
            yy1 = np.maximum(y[i], y[order[1:]])
            xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
            yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])

            w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
            h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
            inter = w1 * h1

            ovr = inter / (areas[i] + areas[order[1:]] - inter)
            inds = np.where(ovr <= self.nms_thresh)[0]
            order = order[inds + 1]
        keep = np.array(keep)
        return keep


    def box_process(self, position, anchors):
        grid_h, grid_w = position.shape[2:4]
        col, row = np.meshgrid(np.arange(0, grid_w), np.arange(0, grid_h))
        col = col.reshape(1, 1, grid_h, grid_w)
        row = row.reshape(1, 1, grid_h, grid_w)
        grid = np.concatenate((col, row), axis=1)
        stride = np.array([self.img_size[1]//grid_h, self.img_size[0]//grid_w]).reshape(1,2,1,1)

        col = col.repeat(len(anchors), axis=0)
        row = row.repeat(len(anchors), axis=0)
        anchors = np.array(anchors)
        anchors = anchors.reshape(*anchors.shape, 1, 1)

        box_xy = position[:,:2,:,:]*2 - 0.5
        box_wh = pow(position[:,2:4,:,:]*2, 2) * anchors

        box_xy += grid
        box_xy *= stride
        box = np.concatenate((box_xy, box_wh), axis=1)

        # Convert [c_x, c_y, w, h] to [x1, y1, x2, y2]
        xyxy = np.copy(box)
        xyxy[:, 0, :, :] = box[:, 0, :, :] - box[:, 2, :, :]/ 2  # top left x
        xyxy[:, 1, :, :] = box[:, 1, :, :] - box[:, 3, :, :]/ 2  # top left y
        xyxy[:, 2, :, :] = box[:, 0, :, :] + box[:, 2, :, :]/ 2  # bottom right x
        xyxy[:, 3, :, :] = box[:, 1, :, :] + box[:, 3, :, :]/ 2  # bottom right y

        return xyxy

    def yolov5_post_process(self, input_data):
        anchors = [[[10.0, 13.0], [16.0, 30.0], [33.0, 23.0]], [[30.0, 61.0],
                 [62.0, 45.0], [59.0, 119.0]], [[116.0, 90.0], [156.0, 198.0], [373.0, 326.0]]]

        boxes, scores, classes_conf = [], [], []
        # 1*255*h*w -> 3*85*h*w
        input_data = [_in.reshape([len(anchors[0]),-1]+list(_in.shape[-2:])) for _in in input_data]
        for i in range(len(input_data)):
            boxes.append(self.box_process(input_data[i][:,:4,:,:], anchors[i]))
            scores.append(input_data[i][:,4:5,:,:])
            classes_conf.append(input_data[i][:,5:,:,:])

        # transpose, reshape
        def sp_flatten(_in):
            ch = _in.shape[1]
            _in = _in.transpose(0,2,3,1)
            return _in.reshape(-1, ch)

        boxes = [sp_flatten(_v) for _v in boxes]
        classes_conf = [sp_flatten(_v) for _v in classes_conf]
        scores = [sp_flatten(_v) for _v in scores]

        boxes = np.concatenate(boxes)
        classes_conf = np.concatenate(classes_conf)
        scores = np.concatenate(scores)

        # filter according to threshold
        boxes, classes, scores = self.filter_boxes(boxes, scores, classes_conf)

        # nms
        nboxes, nclasses, nscores = [], [], []
        for c in set(classes):
            inds = np.where(classes == c)
            b = boxes[inds]
            c = classes[inds]
            s = scores[inds]
            keep = self.nms_boxes(b, s)

            nboxes.append(b[keep])
            nclasses.append(c[keep])
            nscores.append(s[keep])

        if not nclasses and not nscores:
            return None, None, None

        boxes = np.concatenate(nboxes)
        classes = np.concatenate(nclasses)
        scores = np.concatenate(nscores)
        return boxes, classes, scores

    def draw(self, image, boxes, scores, classes):
        object_msg = Detection2DArray()
        for box, score, cl in zip(boxes, scores, classes):
            top, left, right, bottom = [int(_b) for _b in box]
            print('class: {}, score: {}'.format(self.classes_name[cl], score))
            print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))

            cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
            cv2.putText(image, '{0} {1:.2f}'.format(self.classes_name[cl], score),
                        (top, left - 6),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.6, (0, 0, 255), 2)
            detmsg = Detection2D()
            detmsg.bbox.size_x = int(right - top)
            detmsg.bbox.size_y = int(bottom - left)
            detmsg.bbox.center.x = int(top + detmsg.bbox.size_x/2)
            detmsg.bbox.center.y = int(left + detmsg.bbox.size_y/2)
            detmsg.header.frame_id = self.classes_name[cl]


            hyp = ObjectHypothesisWithPose()
            hyp.id = cl
            hyp.score = score
            detmsg.results.append(hyp);
            object_msg.detections.append(detmsg);
        self.det_pub.publish(object_msg)

    def letterbox(self, im, new_shape=(640, 640), color=(0, 0, 0)):
        # Resize and pad image while meeting stride-multiple constraints
        shape = im.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
        return im, ratio, (dw, dh)

    def shutdown(self):
        # Release handle to the webcam
        self.rknn_lite.release()
        rospy.logwarn("now will shutdown detect node ...")

if __name__ == '__main__':
    rospy.init_node('rikibot_object_node', log_level=rospy.INFO)
    detect = TrafficDetect()
    rospy.spin()

