#!/usr/bin/env python3.6
# coding:utf-8

"""trt_yolo.py

This script demonstrates how to do real-time object detection with
TensorRT optimized YOLO engine.
"""

import os
import sys
import time
import argparse
import numpy as np

import rospy
import rospkg
from sensor_msgs.msg import Image
from geometry_msgs.msg import PointStamped

# sys.path.remove('/opt/ros/melodic/lib/python2.7/dist-packages')

import cv2
import pycuda.autoinit  # This is needed for initializing CUDA driver

from utils.yolo_classes import get_cls_dict
from utils.camera import add_camera_args, Camera
from utils.display import open_window, set_display, show_fps, show_tarInfo
from utils.visualization import BBoxVisualization
from utils.yolo_with_plugins import TrtYOLO

from cv_bridge import CvBridge, CvBridgeError

# sys.path.append('/opt/ros/melodic/lib/python2.7/dist-packages')

WINDOW_NAME = 'TrtYOLO'

def parse_args():
  """Parse input arguments."""
  desc = ('Capture and display live camera video, while doing '
          'real-time object detection with TensorRT optimized '
          'YOLO model on Jetson')
  parser = argparse.ArgumentParser(description=desc)
  parser = add_camera_args(parser)
  parser.add_argument(
      '-c', '--category_num', type=int, default=80,
      help='number of object categories [80]')
  parser.add_argument(
      '-t', '--conf_thresh', type=float, default=0.3,
      help='set the detection confidence threshold')
  parser.add_argument(
      '-m', '--model', type=str, default='yolov4-tiny-416',
      help=('[yolov3-tiny|yolov3|yolov3-spp|yolov4-tiny|yolov4|'
            'yolov4-csp|yolov4x-mish|yolov4-p5]-[{dimension}], where '
            '{dimension} could be either a single number (e.g. '
            '288, 416, 608) or 2 numbers, WxH (e.g. 416x256)'))
  parser.add_argument(
      '-l', '--letter_box', action='store_true',
      help='inference with letterboxed image [False]')
  args = parser.parse_args()
  return args


def getIOU(box1, box2, wh=False):
  """
  compute the iou of two boxes.
  Args:
      box1, box2: [xmin, ymin, xmax, ymax] (wh=False) or [xcenter, ycenter, w, h] (wh=True)
      wh: the format of coordinate.
  Return:
      iou: iou of box1 and box2.
  """
  if wh == False:
      xmin1, ymin1, xmax1, ymax1 = box1
      xmin2, ymin2, xmax2, ymax2 = box2
  else:
      xmin1, ymin1 = int(box1[0]-box1[2]/2.0), int(box1[1]-box1[3]/2.0)
      xmax1, ymax1 = int(box1[0]+box1[2]/2.0), int(box1[1]+box1[3]/2.0)
      xmin2, ymin2 = int(box2[0]-box2[2]/2.0), int(box2[1]-box2[3]/2.0)
      xmax2, ymax2 = int(box2[0]+box2[2]/2.0), int(box2[1]+box2[3]/2.0)

  ## 获取矩形框交集对应的左上角和右下角的坐标（intersection）
  xx1 = np.max([xmin1, xmin2])
  yy1 = np.max([ymin1, ymin2])
  xx2 = np.min([xmax1, xmax2])
  yy2 = np.min([ymax1, ymax2])
  
  ## 计算两个矩形框面积
  area1 = (xmax1-xmin1) * (ymax1-ymin1) 
  area2 = (xmax2-xmin2) * (ymax2-ymin2)
  
  inter_area = (np.max([0, xx2-xx1])) * (np.max([0, yy2-yy1])) # 计算交集面积
  iou = inter_area / (area1+area2-inter_area+1e-6) # 计算交并比
  return iou


def getArea(box):
  xmin, ymin, xmax, ymax = box
  area = (xmax-xmin) * (ymax-ymin) 
  return area


def getYawDIs(_img, _boxes):
  if not _boxes:
    return 0.0, 0.0
  person = _boxes[0]
  x_min, y_min, x_max, y_max = person[0], person[1], person[2], person[3]
  rows, cols, channels = _img.shape
  # TODO: assume fov = 90 deg
  yaw = (1.0 - (x_min + x_max)/cols ) * 45.0
  dis = rows/(y_max - y_min) * 1.4 / 0.9 * 0.9  
  return yaw, dis


class image_converter:
  def __init__(self, trt_yolo, conf_thresh, vis):
    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("/MVcam/image",Image,self.callback)
    self.info_pub = rospy.Publisher("/robot/estimate/info",PointStamped,queue_size=1)
    self.image_pub = rospy.Publisher("/robot/image",Image,queue_size=1)
    self.trt_yolo = trt_yolo
    self.conf_thresh = conf_thresh
    self.vis = vis
    self.last_preson = []
  
  # TODO
  def filter_class(self, _boxes, _confs, _clss):
    boxes = []
    confs = []
    index = 0

    for bb, cf, cl in zip(_boxes, _confs, _clss):
      if (cl == 0):
        index = index + 1
        boxes.append(bb)
        confs.append(cf)

    if index > 1:
      if self.last_preson == []:
        rospy.loginfo_throttle(3,"The num of persons in FOV > 1, choose the person whose area is max")
        maxAREA = 0
        maxLabel = 0
        for Label, bb in enumerate(boxes):
          _area = getArea(bb)
          if _area > maxAREA:
            maxAREA = _area
            maxLabel = Label
        boxes[0], boxes[maxLabel] = boxes[maxLabel], boxes[0]
        confs[0], confs[maxLabel] = confs[maxLabel], confs[0]
      else:
        rospy.loginfo_throttle(3,"The num of persons in FOV > 1, choose the person whose IOU with last result is max")
        maxIOU = 0
        maxLabel = 0
        for Label, bb in enumerate(boxes):
          _iou = getIOU(bb, self.last_preson)
          if _iou > maxIOU:
            maxIOU = _iou
            maxLabel = Label
        boxes[0], boxes[maxLabel] = boxes[maxLabel], boxes[0]
        confs[0], confs[maxLabel] = confs[maxLabel], confs[0]
      self.last_preson = boxes[0]
    elif index == 0:  
      print("No person detected")
      self.last_preson = []
    else:
      self.last_preson = boxes[0]

    return boxes, confs


  def loop_and_detect(self, _img, trt_yolo, conf_th, vis):
    """Continuously capture images from camera and do object detection.
    # Arguments
      cam: the camera instance (video source).
      trt_yolo: the TRT YOLO object detector instance.
      conf_th: confidence/score threshold for object detection.
      vis: for visualization.
    """
    fps = 0.0
    tic = time.time()

    img = _img
    boxes, confs, clss = trt_yolo.detect(img, conf_th)
    fps = 1/(time.time() - tic)

    boxes, confs = self.filter_class(boxes, confs, clss)
    yaw, dis = getYawDIs(img, boxes)

    # img = vis.draw_bboxes(img, boxes, confs, clss)
    img = vis.draw_preson_bboxes(img, boxes, confs)
    img = show_fps(img, fps)
    img = show_tarInfo(img, yaw, dis)
    # cv2.imshow(WINDOW_NAME, img) # TODO
    # key = cv2.waitKey(1)
    return yaw, dis, img

  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError as e:
      print(e)

    y, d ,tar_image = self.loop_and_detect(cv_image, self.trt_yolo, self.conf_thresh, self.vis)

    if not d == 0.0:
      info_msg = PointStamped()
      info_msg.point.x = y
      info_msg.point.y = d
      self.info_pub.publish(info_msg)
      ros_image = self.bridge.cv2_to_imgmsg(tar_image, "bgr8")
      self.image_pub.publish(ros_image)


def main():
  # ROS INIT
  rospy.init_node("trt_yolo")
  
  # ARGUMENT INIT
  args = parse_args()
  if args.category_num <= 0:
      raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num)

  current_file_path = os.path.abspath(__file__)
  directory = os.path.dirname(current_file_path)
  relative_path = os.path.join(directory, 'yolo', '{}.trt'.format(args.model))
  if not os.path.isfile(relative_path):
      raise SystemExit('ERROR: file (yolo/%s.trt) not found!' % args.model)

  # Load model
  cls_dict = get_cls_dict(args.category_num)
  vis = BBoxVisualization(cls_dict)

  trt_yolo = TrtYOLO(args.model, args.category_num, args.letter_box)

  ic = image_converter(trt_yolo, args.conf_thresh, vis)

  open_window(
      WINDOW_NAME, 'Camera TensorRT YOLO Demo',
      1280, 1024)

  try:
    rospy.spin()
  except KeyboardInterrupt:
    print("Shutting down")

  cv2.destroyAllWindows()


if __name__ == '__main__':
    main()
