#!/usr/bin/env python
# coding:utf-8

import argparse
import rospy
import cv2
import numpy as np
import paddle
from paddle.inference import Config
from paddle.inference import create_predictor
from paddle3d.ops.iou3d_nms import nms_gpu
from vis_utils import preprocess, Calibration, compute_box_3d, show_bev_with_boxes, show_image_with_boxes
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from sensor_msgs.msg import PointCloud2
from std_msgs.msg import Header
import sensor_msgs.point_cloud2 as pcl2
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
from tf.transformations import quaternion_from_euler
import yolo3_inference
from std_msgs.msg import Float32
import random
import pickle
import time


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--model_file",
        type=str,
        help="Model filename, Specify this when your model is a combined model.",
        default=None)
    parser.add_argument(
        "--params_file",
        type=str,
        help=
        "Parameter filename, Specify this when your model is a combined model.",
        default=None)
    parser.add_argument(
        '--lidar_file', type=str, help='The lidar path.', default=None)
    parser.add_argument(
        '--radar_dir', type=str, help='The radar path.', default=None)
    parser.add_argument(
        '--image_dir', type=str, help='The lidar path.', default=None)
    parser.add_argument(
        '--infrared_dir', type=str, help='The infrared image path.', default=None)
    parser.add_argument(
        '--calib_file', type=str, help='The lidar path.', default=None)
    parser.add_argument(
        "--num_point_dim",
        type=int,
        default=4,
        help="Dimension of a point in the lidar file.")
    parser.add_argument(
        "--point_cloud_range",
        dest='point_cloud_range',
        nargs='+',
        help="Range of point cloud for voxelize operation.",
        type=float,
        default=None)
    parser.add_argument(
        "--voxel_size",
        dest='voxel_size',
        nargs='+',
        help="Size of voxels for voxelize operation.",
        type=float,
        default=None)
    parser.add_argument(
        "--max_points_in_voxel",
        type=int,
        default=100,
        help="Maximum number of points in a voxel.")
    parser.add_argument(
        "--score_thr",
        type=float,
        default=0.5,
        help="Score threshold for visualization.")
    parser.add_argument(
        "--max_voxel_num",
        type=int,
        default=12000,
        help="Maximum number of voxels.")
    parser.add_argument(
        "--iou_thr",
        type=float,
        default=0.1,
        help="IoU threshold for Fusion.")
    parser.add_argument(
        "--use_trt",
        type=int,
        default=0,
        help="Whether to use tensorrt to accelerate when using gpu.")
    parser.add_argument(
        "--trt_precision",
        type=int,
        default=0,
        help="Precision type of tensorrt, 0: kFloat32, 1: kHalf.")
    parser.add_argument(
        "--trt_use_static",
        type=int,
        default=0,
        help="Whether to load the tensorrt graph optimization from a disk path."
    )
    parser.add_argument(
        "--trt_static_dir",
        type=str,
        help="Path of a tensorrt graph optimization directory.")
    parser.add_argument(
        "--collect_shape_info",
        type=int,
        default=0,
        help="Whether to collect dynamic shape before using tensorrt.")
    parser.add_argument(
        "--dynamic_shape_file",
        type=str,
        default="",
        help="Path of a dynamic shape file for tensorrt.")
    parser.add_argument(
        "--infer_object_file",
        type=str,
        default="",
        help="Path of a inference file.")
    parser.add_argument("--gpu_id", type=int, default=0, help="GPU card id.")

    return parser.parse_args()


def init_predictor(model_file,
                   params_file,
                   gpu_id=0,
                   use_trt=False,
                   trt_precision=0,
                   trt_use_static=False,
                   trt_static_dir=None,
                   collect_shape_info=False,
                   dynamic_shape_file=None):
    config = Config(model_file, params_file)
    config.enable_memory_optim()
    config.enable_use_gpu(1000, gpu_id)
    if use_trt:
        precision_mode = paddle.inference.PrecisionType.Float32
        if trt_precision == 1:
            precision_mode = paddle.inference.PrecisionType.Half
        config.enable_tensorrt_engine(
            workspace_size=1 << 30,
            max_batch_size=1,
            min_subgraph_size=10,
            precision_mode=precision_mode,
            use_static=trt_use_static,
            use_calib_mode=False)
        if collect_shape_info:
            config.collect_shape_range_info(dynamic_shape_file)
        else:
            config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, True)
        if trt_use_static:
            config.set_optim_cache_dir(trt_static_dir)

    predictor = create_predictor(config)
    return predictor


def run(predictor, voxels, coords, num_points_per_voxel):
    input_names = predictor.get_input_names()
    for i, name in enumerate(input_names):
        input_tensor = predictor.get_input_handle(name)
        if name == "voxels":
            input_tensor.reshape(voxels.shape)
            input_tensor.copy_from_cpu(voxels.copy())
        elif name == "coords":
            input_tensor.reshape(coords.shape)
            input_tensor.copy_from_cpu(coords.copy())
        elif name == "num_points_per_voxel":
            input_tensor.reshape(num_points_per_voxel.shape)
            input_tensor.copy_from_cpu(num_points_per_voxel.copy())

    # do the inference
    predictor.run()

    # get out data from output tensor
    output_names = predictor.get_output_names()

    for i, name in enumerate(output_names):
        output_tensor = predictor.get_output_handle(name)
        if i == 0:
            box3d_lidar = output_tensor.copy_to_cpu()
        elif i == 1:
            label_preds = output_tensor.copy_to_cpu()
        elif i == 2:
            scores = output_tensor.copy_to_cpu()
    return box3d_lidar, label_preds, scores


def publish_img(img_pub, img):
    msg = CvBridge().cv2_to_imgmsg(img, "bgr8")
    img_pub.publish(msg)


def publish_pcl(pcl_pub, pcl_data):
    header = Header()
    header.stamp = rospy.Time.now()
    header.frame_id = 'map'
    pcl_pub.publish(pcl2.create_cloud_xyz32(header, pcl_data[:, :3]))


def publish_speed(speed_pub, speed_car):
    speed_pub.publish(speed_car)


def publish_boxes(pub_arr_bbox, box3d_lidar, score_thr=0.3):
    arr_bbox = BoundingBoxArray()
    arr_bbox.header.frame_id = "map"

    num_bbox3d, bbox3d_dims = box3d_lidar.shape
    for box_idx in range(num_bbox3d):
        # filter fake results: score = -1

        if scores[box_idx] <= score_thr:
            continue

        bbox = BoundingBox()
        bbox.header.frame_id = "map"

        bbox_raw = box3d_lidar[box_idx]
        x, y, z, w, l, h, rot = bbox_raw

        bbox.pose.position.x = float(x)
        bbox.pose.position.y = float(y)
        bbox.pose.position.z = float(z)

        q = quaternion_from_euler(0, 0, -rot)
        bbox.pose.orientation.x = q[0]
        bbox.pose.orientation.y = q[1]
        bbox.pose.orientation.z = q[2]
        bbox.pose.orientation.w = q[3]

        bbox.dimensions.x = float(w)
        bbox.dimensions.y = float(l)
        bbox.dimensions.z = float(h)
        arr_bbox.boxes.append(bbox)

    # if len(arr_bbox.boxes) != 0:
    pub_arr_bbox.publish(arr_bbox)
    # arr_bbox.boxes.clear()


# 计算3D检测框和2D检测框之间的IoU并进行阈值筛选
def iou_filter(box1_list, box2, label, iou_threshold):
    for i, box1 in enumerate(box1_list):
        if box1[1] < 0.3:
            continue
        if label == 0 and box1[0] != 10:
            continue
        if label == 1 and box1[0] != 11:
            continue
        inter_upleft = np.maximum(box1[2:4], box2[:2])
        inter_botright = np.minimum(box1[4:], box2[2:])

        inter_wh = inter_botright - inter_upleft
        inter_wh = np.maximum(inter_wh, 0)
        # 交集面积
        inter = inter_wh[0] * inter_wh[1]
        # 3D框的面积
        area_gt = (box2[2] - box2[0]) * (box2[3] - box2[1])
        # 2D框的面积
        area_pred = (box1[4] - box1[2]) * (box1[5] - box1[3])
        # 并集面积
        union = area_gt + area_pred - inter
        # 计算 IoU
        iou_val = inter / union
        if iou_val > iou_threshold:
            new_boxes = np.delete(box1_list, i, axis=0)
            return new_boxes, True
    return box1_list, False


# 3D检测与2D检测后融合
def fusion_filter(image, box2d_image, box3d_lidar, label_preds, scores, score_thr, iou_thr):
    filted_idx = []
    box2d = box2d_image
    w, h, _ = image.shape
    for box_idx in range(len(box3d_lidar)):
        obj = box3d_lidar[box_idx]
        # Draw bev bounding box
        box3d_pts_3d_velo = compute_box_3d(obj)
        box3d = calib.project_velo_to_image(box3d_pts_3d_velo)
        xmin, ymin = np.min(box3d, axis=0)
        xmax, ymax = np.max(box3d, axis=0)
        # 筛选2D未能检测区域的3D检测对象
        if min([xmin, ymin]) < 0 or xmax > h or ymax > w:
            if scores[box_idx] > score_thr:
                filted_idx.append(box_idx)
            continue
        box3d = np.array((xmin, ymin, xmax, ymax))
        box2d, mask = iou_filter(box2d, box3d, label_preds[box_idx], iou_thr)
        if mask:
            filted_idx.append(box_idx)
        elif scores[box_idx] > score_thr:
            filted_idx.append(box_idx)
    if len(filted_idx) > 0:
        filter_box3d = box3d_lidar[filted_idx]
        filter_label = label_preds[filted_idx]
        filter_scores = scores[filted_idx]
    else:
        filter_box3d = np.zeros((1, 7))
        filter_label = np.zeros((1, 1))
        filter_scores = np.zeros((1, 1))
    return filter_box3d, filter_label, filter_scores


if __name__ == '__main__':
    # file_3d = open("/output/huailai_load2_che.txt", "w")
    # create ros_node
    rospy.init_node("inference_show")

    # create ros_publisher
    pub_cam = rospy.Publisher("cam_image", Image, queue_size=10)
    pub_bev = rospy.Publisher("bev", Image, queue_size=10)
    pub_infrared = rospy.Publisher("infrared_image", Image, queue_size=10)
    pub_pc = rospy.Publisher("lidar", PointCloud2, queue_size=10)
    pub_radar = rospy.Publisher("radar", PointCloud2, queue_size=10)
    pub_arr_bbox = rospy.Publisher("kitti_bbox", BoundingBoxArray, queue_size=10)
    pub_speed = rospy.Publisher("speed", Float32, queue_size=10)
    rate = rospy.Rate(1.5)

    # inference process
    args = parse_args()
    predictor = init_predictor(args.model_file, args.params_file, args.gpu_id,
                               args.use_trt, args.trt_precision,
                               args.trt_use_static, args.trt_static_dir,
                               args.collect_shape_info, args.dynamic_shape_file)
    # predictor2 = yolo3_inference.init_predictor("/work/det2d_model/model.pdmodel",
    #                                             "/output/det2d_model/model.pdiparams")

    from pathlib import Path

    if Path(args.lidar_file).is_file():
        lidar_file_list = [args.lidar_file]
    else:
        lidar_file_list = list(Path(args.lidar_file).glob('*.bin'))
        lidar_file_list.sort()
    if args.image_dir is None or Path(args.image_dir).is_file():
        image_file_list = [args.image_dir] * len(lidar_file_list)
    else:
        image_file_list = list(Path(args.image_dir).glob('*.png'))
        image_file_list.sort()
    if args.radar_dir is None or Path(args.radar_dir).is_file():
        radar_file_list = [args.radar_dir] * len(lidar_file_list)
    else:
        radar_file_list = list(Path(args.radar_dir).glob('*.bin'))
        radar_file_list.sort()
    if args.infrared_dir is None or Path(args.infrared_dir).is_file():
        infrared_file_list = [args.infrared_dir] * len(lidar_file_list)
    else:
        infrared_file_list = list(Path(args.infrared_dir).glob('*.png'))
        infrared_file_list.sort()

    # car_idx = 0
    # ped_idx = 0
    # count = 0
    # R = np.array([[0.996195, -0.0871557, 0],
    #              [0.0871557, 0.996195, 0],
    #              [0, 0, 1]])
    # P = np.array([3.8, 0, 0.5])
    # R = np.linalg.inv(R.T)
    #
    # T1 = time.time()
    for lidar_file, image_file, radar_file, infrared_file in zip(lidar_file_list, image_file_list, radar_file_list,
                                                                 infrared_file_list):
        print(lidar_file, image_file)
        voxels, coords, num_points_per_voxel = preprocess(
            lidar_file, args.num_point_dim, args.point_cloud_range,
            args.voxel_size, args.max_points_in_voxel, args.max_voxel_num)

        if len(voxels) > 1:
            box3d_lidar, label_preds, scores = run(predictor, voxels, coords,
                                                   num_points_per_voxel)
        else:
            box3d_lidar = np.zeros((1, 7))
            label_preds = np.zeros((1, 1))
            scores = np.zeros((1, 1))

        # count += 1
        # if count > 100:
        #     print("*********************")
        #     print(f"人车融合模型每帧程序运行时间：{(time.time()-T1)/count} s")
        #     break
        # box3d = []
        # score3d = []
        # with open("/output/1102_car_night_3d.txt", "r") as file_car:
        #     data = file_car.readlines()
        #     for line in data[car_idx:]:
        #         if line != '\n':
        #             bbox_car = line.split(' ')
        #             bbox_car = list(map(float, bbox_car))
        #             box3d.append(bbox_car[1:])
        #             score3d.append(bbox_car[0])
        #             car_idx += 1
        #             continue
        #         car_idx += 1
        #         box3d_car = np.array(box3d)
        #         scores_car = np.array(score3d)
        #         break

        # with open("/output/1102_ped_night_3d.txt", "r") as file_ped:
        #     data = file_ped.readlines()
        #     for line in data[ped_idx:]:
        #         if line != '\n':
        #             bbox_ped = line.split(' ')
        #             bbox_ped = list(map(float, bbox_ped))
        #             box3d.append(bbox_ped[1:])
        #             score3d.append(bbox_ped[0])
        #             ped_idx += 1
        #             continue
        #         ped_idx += 1
        #         box3d_ped = np.array(box3d)
        #         scores_ped = np.array(score3d)
        #         break
        # box3d_lidar = np.concatenate((box3d_car, box3d_ped), axis=0)
        # scores = np.concatenate((scores_car, scores_ped), axis=0)
        # for box_3d_str, scores_str in zip(box3d_lidar, scores):
        #     box_3d_str = ' '.join(map(str, box_3d_str))
        #     scores_str = str(scores_str)
        #     file_3d.write(scores_str + ' ' + box_3d_str + '\n')
        # file_3d.write('\n')

        scan = np.fromfile(lidar_file, dtype=np.float32)
        # # radar_scan = np.fromfile(radar_file, dtype=np.float32)
        pc_velo = scan.reshape((-1, 4))
        # # radar_velo = radar_scan.reshape((-1, 4))
        # # radar_velo = radar_velo[:, :4]
        # # radar_velo[:, :3] = np.dot((radar_velo[:, :3] - P), R)
        # # Obtain calibration information about Kitti
        # calib = Calibration(args.calib_file)
        # # Plot box in bev
        # bev_im = show_bev_with_boxes(pc_velo, box3d_lidar, scores, args.score_thr)
        # cam_im = cv2.imread(str(image_file))
        # # infrared_im = cv2.imread(str(infrared_file))
        # box2d = yolo3_inference.yolo_inference(predictor2, cam_im)

        # # 3D检测后融合2D检测
        # box3d_lidar, label_preds, scores = fusion_filter(cam_im, box2d, box3d_lidar, label_preds, scores,
        #                                                  args.score_thr, args.iou_thr)
        #
        # box_im = show_image_with_boxes(cam_im, box3d_lidar, scores, calib)

        # cv2.imshow('bev', bev_im)
        # cv2.imshow('cam', box_im)
        # cv2.waitKey(0)

        # publish_img(pub_bev, cam_im)
        # publish_img(pub_cam, box_im)
        # publish_img(pub_infrared, infrared_im)
        publish_pcl(pub_pc, pc_velo[:, :3])
        # publish_pcl(pub_radar, radar_velo[:, :3])
        publish_boxes(pub_arr_bbox, box3d_lidar)
        key = input("Press Enter to continue...")
        if key == 'q':
            break
        # rate.sleep()
    # file_3d.close()

    while not rospy.is_shutdown():
        # publish_img(pub_bev, cam_im)
        # publish_img(pub_cam, box_im)
        # publish_img(pub_infrared, infrared_im)
        publish_pcl(pub_pc, pc_velo[:, :3])
        # publish_pcl(pub_radar, radar_velo[:, :3])
        publish_boxes(pub_arr_bbox, box3d_lidar)
        rate.sleep()
