#include "fusionv2.h"

using namespace std;


LidarCamFusion::LidarCamFusion(ros::NodeHandle &nh)
{   
    marker_center = vector<cv::Point2d>(20);
    // cam_topic_ = "/kitti/camera_color_left/image_raw";
    // lidar_topic_ = "/kitti/velo/pointcloud";

    // cam_topic_ = "/image_raw";
    // lidar_topic_ = "velodyne_points";

    // marker_topic_ = "markersArray";
    
    nh.getParam("cam_topic", cam_topic_);
    nh.getParam("lidar_topic", lidar_topic_);
    nh.getParam("marker_topic", marker_topic_);

    ROS_INFO("cam_topic : %s",cam_topic_.c_str());
    ROS_INFO("lidar_topic : %s",lidar_topic_.c_str());
    ROS_INFO("marker_topic : %s",marker_topic_.c_str());

    P_rect_00 = cv::Mat(3,4,cv::DataType<double>::type);
    R_rect_00 = cv::Mat(4,4,cv::DataType<double>::type);
    RT = cv::Mat(4,4,cv::DataType<double>::type);

    RT.at<double>(0,0) = 7.533745e-03; RT.at<double>(0,1) = -9.999714e-01; RT.at<double>(0,2) = -6.166020e-04; RT.at<double>(0,3) = -4.069766e-03;
    RT.at<double>(1,0) = 1.480249e-02; RT.at<double>(1,1) = 7.280733e-04; RT.at<double>(1,2) = -9.998902e-01; RT.at<double>(1,3) = -7.631618e-02;
    RT.at<double>(2,0) = 9.998621e-01; RT.at<double>(2,1) = 7.523790e-03; RT.at<double>(2,2) = 1.480755e-02; RT.at<double>(2,3) = -2.717806e-01;
    RT.at<double>(3,0) = 0.0; RT.at<double>(3,1) = 0.0; RT.at<double>(3,2) = 0.0; RT.at<double>(3,3) = 1.0;
    
    R_rect_00.at<double>(0,0) = 9.999239e-01; R_rect_00.at<double>(0,1) = 9.837760e-03; R_rect_00.at<double>(0,2) = -7.445048e-03; R_rect_00.at<double>(0,3) = 0.0;
    R_rect_00.at<double>(1,0) = -9.869795e-03; R_rect_00.at<double>(1,1) = 9.999421e-01; R_rect_00.at<double>(1,2) = -4.278459e-03; R_rect_00.at<double>(1,3) = 0.0;
    R_rect_00.at<double>(2,0) = 7.402527e-03; R_rect_00.at<double>(2,1) = 4.351614e-03; R_rect_00.at<double>(2,2) = 9.999631e-01; R_rect_00.at<double>(2,3) = 0.0;
    R_rect_00.at<double>(3,0) = 0; R_rect_00.at<double>(3,1) = 0; R_rect_00.at<double>(3,2) = 0; R_rect_00.at<double>(3,3) = 1;

    P_rect_00.at<double>(0,0) = 7.215377e+02; P_rect_00.at<double>(0,1) = 0.000000e+00; P_rect_00.at<double>(0,2) = 6.095593e+02; P_rect_00.at<double>(0,3) = 0.000000e+00;
    P_rect_00.at<double>(1,0) = 0.000000e+00; P_rect_00.at<double>(1,1) = 7.215377e+02; P_rect_00.at<double>(1,2) = 1.728540e+02; P_rect_00.at<double>(1,3) = 0.000000e+00;
    P_rect_00.at<double>(2,0) = 0.000000e+00; P_rect_00.at<double>(2,1) = 0.000000e+00; P_rect_00.at<double>(2,2) = 1.000000e+00; P_rect_00.at<double>(2,3) = 0.000000e+00; 

    PRT = cv::Mat(3,1,cv::DataType<double>::type);
    PRT = P_rect_00 * R_rect_00 * RT;

    ifstream classNamesFile(yoloClassesFile);
    if(classNamesFile.is_open())
    {
        string className = "";
        while(getline(classNamesFile, className))
            classNamesVec.push_back(className);
    }

    net = load_network((char*)yoloModelConfiguration.c_str(), (char*)yoloModelWeights.c_str(), 0 );
    set_batch_network(net, 1);

    image_transport::ImageTransport it(nh);
    pub_imgdetec = it.advertise("camDetec",10);


    cam_sub_.subscribe(nh, cam_topic_, 10);
    lidar_sub_.subscribe(nh, lidar_topic_, 10);
    marker_sub_.subscribe(nh,marker_topic_,10);

    pub_finalMarker = nh.advertise<visualization_msgs::MarkerArray>("final_markers", 10);

    // （1）激光雷达和图像融合
    typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::PointCloud2> cam_lidar_fuse_policy;
    typedef message_filters::Synchronizer<cam_lidar_fuse_policy> Sync;
    boost::shared_ptr<Sync> sync;
    sync.reset(new Sync(cam_lidar_fuse_policy(10), cam_sub_, lidar_sub_));
    sync->registerCallback(boost::bind(&LidarCamFusion::fusionCallback, this, _1, _2));

    // （2）激光雷达障碍物marker和图像融合
    // typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, lidarmarkers_msg::lidarMarkers> cam_lidar_fuse_policy;
    // typedef message_filters::Synchronizer<cam_lidar_fuse_policy> Sync;
    // boost::shared_ptr<Sync> sync;
    // sync.reset(new Sync(cam_lidar_fuse_policy(10), cam_sub_, marker_sub_));
    // sync->registerCallback(boost::bind(&LidarCamFusion::fusionImgMarkerCallback, this, _1, _2));
   
    
    ROS_INFO("[3]+LIDAR camera fusion node start \n");

    ros::MultiThreadedSpinner spinner(8); // Use 4 threads
    spinner.spin();
}
LidarCamFusion::~LidarCamFusion(){
    free_network(net);
}

// opencv自带dnn库，结果实时性较差，使用darknet的cuda、opencv、GPU编译提升速度，达到实时性
void LidarCamFusion::detectObjectsCV(cv::Mat& img, std::vector<BoundingBox>& bBoxes, float confThreshold, float nmsThreshold, 
                std::string classesFile, std::string modelConfiguration, std::string modelWeights, bool bVis)
{
    // load class names from file
    vector<string> classes;
    ifstream ifs(classesFile.c_str());
    string line;
    while (getline(ifs, line)) classes.push_back(line);
    
    // 载入darknet
    cv::dnn::Net net = cv::dnn::readNetFromDarknet(modelConfiguration, modelWeights);
    
    net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
    // net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
    // net.setPreferableTarget(cv::dnn::DNN_TARGET_OPENCL);
    
    // 4D blob
    cv::Mat blob;
    vector<cv::Mat> netOutput;
    double scalefactor = 1/255.0;
    // cv::Size size = cv::Size(416, 416);
    // cv::Size size = cv::Size(1024, 1024);
    // cv::Size size = cv::Size(608, 608);
    cv::Size size = cv::Size(320, 320);
    // cv::Size size = cv::Size(160, 160);
    cv::Scalar mean = cv::Scalar(0,0,0);
    bool swapRB = true;
    bool crop = false;
    
    blob = cv::dnn::blobFromImage(img,  scalefactor, size, mean, swapRB, crop);
    // cout << blob.flags << "*" << blob.isContinuous() << endl;
    
    vector<cv::String> names;
    vector<int> outLayers = net.getUnconnectedOutLayers();
    vector<cv::String> layersNames = net.getLayerNames();
    
    names.resize(outLayers.size());
    for (size_t i = 0; i < outLayers.size(); ++i){
        names[i] = layersNames[outLayers[i] - 1];
    }

    double t = (double)cv::getTickCount();
    net.setInput(blob);
    net.forward(netOutput, names);
    t = ((double)cv::getTickCount() - t) / cv::getTickFrequency();
    cout <<  1000 * t / 1.0 << " ms" << endl;

    
    // Scan through all bounding boxes and keep only the ones with high confidence
    vector<int> classIds; vector<float> confidences; vector<cv::Rect> boxes;
    for (size_t i = 0; i < netOutput.size(); ++i)
    {
        float* data = (float*)netOutput[i].data;
        for (int j = 0; j < netOutput[i].rows; ++j, data += netOutput[i].cols)
        {
            cv::Mat scores = netOutput[i].row(j).colRange(5, netOutput[i].cols);
            cv::Point classId;
            double confidence;
            
            // Get the value and location of the maximum score
            cv::minMaxLoc(scores, 0, &confidence, 0, &classId);
            if (confidence > confThreshold)
            {
                cv::Rect box; int cx, cy;
                cx = (int)(data[0] * img.cols);
                cy = (int)(data[1] * img.rows);
                box.width = (int)(data[2] * img.cols);
                box.height = (int)(data[3] * img.rows);
                box.x = cx - box.width/2; // left
                box.y = cy - box.height/2; // top
                
                boxes.push_back(box);
                classIds.push_back(classId.x);
                confidences.push_back((float)confidence);
            }
        }
    }
    
    // perform non-maxima suppression
    vector<int> indices;
    cv::dnn::NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);
    for(auto it=indices.begin(); it!=indices.end(); ++it) {
        
        BoundingBox bBox;
        bBox.roi = boxes[*it];
        bBox.classID = classIds[*it];
        bBox.confidence = confidences[*it];
        bBox.boxID = (int)bBoxes.size(); // zero-based unique identifier for this bounding box
        if(bBox.classID == 2){
            bBoxes.push_back(bBox);
        }
    }
    
    // show results
    if(bVis) {
        for(auto it=bBoxes.begin(); it!=bBoxes.end(); ++it) {
            
            // Draw rectangle displaying the bounding box
            int top, left, width, height;
            top = (*it).roi.y;
            left = (*it).roi.x;
            width = (*it).roi.width;
            height = (*it).roi.height;
            cv::rectangle(img, cv::Point(left, top), cv::Point(left+width, top+height),cv::Scalar(0, 255, 0), 2);
            
            string label = cv::format("%.2f", (*it).confidence);
            label = classes[((*it).classID)] + ":" + label;
        
            // Display label at the top of the bounding box
            int baseLine;
            cv::Size labelSize = getTextSize(label, cv::FONT_ITALIC, 0.5, 1, &baseLine);
            top = max(top, labelSize.height);
            rectangle(img, cv::Point(left, top - round(1.5*labelSize.height)), cv::Point(left + round(1.5*labelSize.width), top + baseLine), cv::Scalar(255, 255, 255), cv::FILLED);
            cv::putText(img, label, cv::Point(left, top), cv::FONT_ITALIC, 0.75, cv::Scalar(0,0,0),1);            
        }        
        string windowName = "车辆检测";
        cv::namedWindow( windowName, 3 );
        cv::imshow( windowName, img );
        cv::waitKey(1);
    }   
}
/* 
void LidarCamFusion::lidarCluster(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud)
{
    std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clusters;

    pcl::search::KdTree<pcl::PointXYZ>::Ptr tree(new pcl::search::KdTree<pcl::PointXYZ>);
    tree->setInputCloud(cloud);

    std::vector<pcl::PointIndices> clusterIndices;
    pcl::EuclideanClusterExtraction<pcl::PointXYZ> ec;
    ec.setClusterTolerance(0.52);
    ec.setMinClusterSize(8);
    ec.setMaxClusterSize(200);
    ec.setSearchMethod(tree);
    ec.setInputCloud(cloud);
    ec.extract(clusterIndices);

    pcl::PointXYZ minPt;
    pcl::PointXYZ maxPt;

    for(pcl::PointIndices getIndices: clusterIndices){
        pcl::PointCloud<pcl::PointXYZ>::Ptr cloudCluster(new pcl::PointCloud<pcl::PointXYZ>);
        for(int index : getIndices.indices)
            cloudCluster->points.push_back (cloud->points[index]);

        pcl::getMinMax3D(*cloudCluster,minPt,maxPt);
        float length = maxPt.x - minPt.x;
        float width = maxPt.y - minPt.y;
        float highth = maxPt.z - minPt.z;
        
        if(length < 5 && width < 5 && length >= 0.8 && width >= 0.8 && highth >= 0.5){

            // DetectionCore::reSampleCloud(cloudCluster, 0.1);
            cloudCluster->width = cloudCluster->points.size();
            cloudCluster->height = 1;
            cloudCluster->is_dense = true;
            clusters.push_back(cloudCluster);
        }


    }
}
*/
void LidarCamFusion::showLidarImgOverlay(cv::Mat &img, pcl::PointCloud<pcl::PointXYZ>::Ptr lidarPoints)
{
    // find max. x-value
    double maxVal = 0.0; 
    for (size_t i = 0; i < lidarPoints->points.size(); ++i){
        maxVal = maxVal < lidarPoints->points[i].x ? lidarPoints->points[i].x : maxVal;
    }
    cv::Mat X(4,1,cv::DataType<double>::type);
    cv::Mat Y(3,1,cv::DataType<double>::type);
    for(size_t i = 0; i < lidarPoints->points.size(); ++i) {
        X.at<double>(0, 0) = lidarPoints->points[i].x;
        X.at<double>(1, 0) = lidarPoints->points[i].y;
        X.at<double>(2, 0) = lidarPoints->points[i].z;
        X.at<double>(3, 0) = 1;
        
        Y = PRT * X;

        cv::Point pt;
        pt.x = Y.at<double>(0, 0) / Y.at<double>(0, 2);
        pt.y = Y.at<double>(1, 0) / Y.at<double>(0, 2);

        float val = lidarPoints->points[i].x;

        int red = min(255, (int)(255 * abs((val - maxVal) / maxVal)));
        int green = min(255, (int)(255 * (1 - abs((val - maxVal) / maxVal))));
        cv::circle(img, pt, 10, cv::Scalar(125, green, red), -1);
    }
    
    string windowName = "LiDAR on Image";
    cv::namedWindow( windowName, 5 );
    cv::imshow( windowName, img );
    cv::waitKey(1);
}
void LidarCamFusion::yolov3Detec(cv::Mat& visImg)
{
    cv::Mat rgbImg;
    cvtColor(visImg, rgbImg,COLOR_BGR2RGB);
    float* srcImg;
    size_t srcSize = rgbImg.rows*rgbImg.cols*3*sizeof(float);
    srcImg=(float*)malloc(srcSize);

    imgConvert(rgbImg,srcImg);

    float* resizeImg;
    size_t resizeSize = net->w * net->h * 3 * sizeof(float);
    resizeImg=(float*)malloc(resizeSize);
    imgResize(srcImg, resizeImg, visImg.cols, visImg.rows, net->w, net->h);  

    network_predict(net, resizeImg);
    
    int nboxes=0;
    
    float nms = 0.35;
    detection *dets=get_network_boxes(net, rgbImg.cols, rgbImg.rows, thresh, 0.5, 0,1, &nboxes);

    if(nms)
    {
        do_nms_sort(dets,nboxes,classes,nms);
    }


    boxes.clear();
    classNames.clear();
    for(int i=0; i<nboxes; i++)
    {   
        thresh = 0.5;
        bool flag= 0;
        int className;
        for(int j = 0; j < classes; j++)
        {
            if(dets[i].prob[j] > thresh)
            {
                thresh = dets[i].prob[j];
                flag = 1;
                className = j;
            }
        }
        if(flag && className == 2)
        {
            int left = (dets[i].bbox.x - dets[i].bbox.w/2.)*visImg.cols;
            int right = (dets[i].bbox.x + dets[i].bbox.w/2.)*visImg.cols;
            int top = (dets[i].bbox.y - dets[i].bbox.h/2.)*visImg.rows;
            int bot = (dets[i].bbox.y + dets[i].bbox.h/2.)*visImg.rows;

            if(left < 0)                left = 0;
            if(right > visImg.cols -1)   right = visImg.cols - 1;
            if(top < 0)                 top = 0;
            if(bot > visImg.rows-1)      bot = visImg.rows-1;

            Rect box(left, top, fabs(left-right),fabs(bot-top));
            boxes.push_back(box);
            classNames.push_back(className);
        }
    }

    boxes_updated = true;

    free_detections(dets,nboxes); 
    
    for(unsigned int i=0; i < boxes.size(); i++)
    {
        int offset = classNames[i] * 123457 % 80;
        float red   = 255*get_color(2,offset,80);
        float green = 255*get_color(1,offset,80);
        float blue  = 255*get_color(0,offset,80);

        rectangle(visImg,boxes[i],Scalar(blue,green,red),2);                            

        
        String label = String(classNamesVec[classNames[i]]);
        int baseLine = 0;
        Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5,1, &baseLine);
        // putText(visImg,label, Point(boxes[i].x,boxes[i].y+labelSize.height),FONT_HERSHEY_SIMPLEX, 1,Scalar(red,blue,green),2);
        
        cv::Point2d temp_box(boxes[i].x+boxes[i].width/2.0, boxes[i].y+boxes[i].height/2.0);
        box_center.push_back(temp_box);
    }
    
    free(srcImg);
    free(resizeImg);
}
void LidarCamFusion::fusionCallback(const sensor_msgs::ImageConstPtr& image, const sensor_msgs::PointCloud2ConstPtr& cloud)
{

    // cout << "image_time: " << image->header.stamp << "\n"  << "lidar_time: " << cloud->header.stamp << "\n"  << endl;

    /* 1---image process */
    cv_bridge::CvImagePtr cv_ptr;
    cv_ptr = cv_bridge::toCvCopy(image, sensor_msgs::image_encodings::BGR8);
    cv::Mat cvimg = cv_ptr->image;
    cv::Mat visImg; 
    visImg = cvimg.clone();

    // show raw image
    std::string windowName = "Image Raw";
    cv::namedWindow( windowName, 1 );
    cv::imshow( windowName, visImg );
    cv::waitKey(1);
    
    // yoloV3 detection
    yolov3Detec(visImg);

    // show detec image
    std::string windowName2 = "Car Detec";
    cv::namedWindow( windowName2, 2 );
    cv::imshow( windowName2, visImg );
    cv::waitKey(2);
    
    sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", visImg).toImageMsg();
    pub_imgdetec.publish(msg);


    /* 2---lidar process */ 
    pcl::PointCloud<pcl::PointXYZ>::Ptr pcl_cloud (new pcl::PointCloud<pcl::PointXYZ>);
    pcl::fromROSMsg(*cloud, *pcl_cloud);
    pcl::PassThrough<pcl::PointXYZ> pass;
    pass.setInputCloud (pcl_cloud);
    pass.setFilterFieldName ("x");
    pass.setFilterLimits (0.0, 100);
    pass.filter (*pcl_cloud);


    /* 机器人小车激光雷达投影图像 */
    vector<cv::Point3f> msg_;
    for(size_t i = 0; i < pcl_cloud->points.size(); ++i){
        cv::Point3f pt(pcl_cloud->points[i].x, pcl_cloud->points[i].y,pcl_cloud->points[i].z);
        
        msg_.push_back(pt);
    }
    vector<cv::Point2d> lidarPoints2d;
    cv::Size imageSize;
    imageSize.width = 640;
    imageSize.height = 480;
    lidarPoints2d = pointcloud2_to_image(msg_, imageSize);
    for (int i = 0; i< lidarPoints2d.size(); ++i){
        // cout << "x = " << lidarPoints2d[i].x << endl;
        for(int j = 0; j < boxes.size(); j++){
            if(boxes[j].contains(lidarPoints2d[i])){
                cv::circle(visImg, lidarPoints2d[i], 3, cv::Scalar(255, 255, 0), -1);
            }
        }
        
    }
    string windowName6 = "Real LiDAR on Image";
    cv::namedWindow( windowName6, 5 );
    cv::imshow( windowName6, visImg );
    cv::waitKey(1);
    

    // cout << pcl_cloud->points.size() << endl;

    // pcl::PointCloud<pcl::PointXYZ>::Ptr cluster_cloud(new pcl::PointCloud<pcl::PointXYZ>);
    // pcl::PointCloud<pcl::PointXYZ>::Ptr centerPoints(new pcl::PointCloud<pcl::PointXYZ>);
    // lidarCluster(pcl_cloud, cluster_cloud, centerPoints);
        
  
    // showLidarImgOverlay(cvimg, pcl_cloud); // kitti数据集x>0的点有六七万，实时投影比较耗时

    // TODO: pcl和opencv的flann库会冲突，同时调用两者时，使用pcl的kdtree搜索树，编译会报错
    //       解决以上问题后，在本回调函数中直接聚类点云，实现聚类点云的投影，以及与图像box的匹配融合。
    //       暂未解决库的问题，直接订阅obstacle包发出的marker话题，实现融合效果。    
}
void LidarCamFusion::fusionImgMarkerCallback(const sensor_msgs::ImageConstPtr& image, const lidarmarkers_msg::lidarMarkersConstPtr& lidarMarker)
{
    /* 1---image process */
    cv_bridge::CvImagePtr cv_ptr;
    cv_ptr = cv_bridge::toCvCopy(image, sensor_msgs::image_encodings::BGR8);
    cv::Mat cvimg = cv_ptr->image;
    cv::Mat visImg; 
    visImg = cvimg.clone();

    // show raw image
    // std::string windowName = "Image Raw";
    // cv::namedWindow( windowName, 1 );
    // cv::imshow( windowName, visImg );
    // cv::waitKey(1);
    
    // yoloV3 detection
    yolov3Detec(visImg);

    // show detec image
    // std::string windowName2 = "Car Detec";
    // cv::namedWindow( windowName2, 2 );
    // cv::imshow( windowName2, visImg );
    // cv::waitKey(2);

    /* 2--机器人小车激光雷达投影图像 */
    /* TODO:由于激光雷达位置过低，16线激光雷达障碍物点云太稀疏，车辆点云太少，尽管图像效果很好，但融合效果较差， */
    
    vector<cv::Point3f> msg_;
    for(size_t i = 0; i < lidarMarker->markerArray.markers.size(); ++i) 
    {
        if(lidarMarker->markerArray.markers[i].points[25].x > 0)
        {
            cv::Point3f pt(lidarMarker->markerArray.markers[i].points[25].x, lidarMarker->markerArray.markers[i].points[25].y,lidarMarker->markerArray.markers[i].points[25].z);      
            msg_.push_back(pt);
        }
    }
    vector<cv::Point2d> lidarPoints2d;
    cv::Size imageSize;
    imageSize.width = 640;
    imageSize.height = 480;
    lidarPoints2d = pointcloud2_to_image(msg_, imageSize);

    vector<int> markerInImg;
    visualization_msgs::MarkerArray fusionMarkerArray;
    for (vector<cv::Rect>::iterator it = boxes.begin(); it != boxes.end(); ++it){
        int flag = 0;
        bool imgHasPoint = false;
        float distanceThres = pow(it->width/2, 2) * 3 / 4;
        float tempdis = 0;

        for (int i = 0; i< lidarPoints2d.size(); ++i){
            cv::Point2d pt(lidarPoints2d[i].x, lidarPoints2d[i].y);
            tempdis = pow((it->x + it->width/2 - pt.x), 2) + pow((it->y + it->height/2 - pt.y), 2);
            if (it->contains(pt) && tempdis < distanceThres)
            {   
                marker_center.push_back(pt);
                imgHasPoint = true;
                flag = i;
                cv::circle(visImg, pt, 10, cv::Scalar(255, 255, 0), -1);  
                visualization_msgs::Marker finalMaker;
                finalMaker = lidarMarker->markerArray.markers[flag];
                finalMaker.header.frame_id = lidarMarker->header.frame_id;
                finalMaker.header.stamp = image->header.stamp;
                // correctMarker(finalMaker);
                fusionMarkerArray.markers.push_back(finalMaker);                  
            } 
        }
    }
    


    /* 2---marker center process (KITTI database)*/
    /* 
    cv::Mat X(4,1,cv::DataType<double>::type);
    cv::Mat Y(3,1,cv::DataType<double>::type);
    
    visualization_msgs::MarkerArray fusionMarkerArray;
    int j = 0;
    for (vector<cv::Rect>::iterator it = boxes.begin(); it != boxes.end(); ++it){
        int flag = 0;
        bool imgHasPoint = false;
        float distanceThres = pow(it->width/2, 2) * 3 / 4;
        float tempdis = 0;
        
        for(size_t i = 0; i < lidarMarker->markerArray.markers.size(); ++i) 
        {
            if(lidarMarker->markerArray.markers[i].points[25].x > 0)
            {
                X.at<double>(0, 0) = lidarMarker->markerArray.markers[i].points[25].x;
                X.at<double>(1, 0) = lidarMarker->markerArray.markers[i].points[25].y;
                X.at<double>(2, 0) = lidarMarker->markerArray.markers[i].points[25].z;
                X.at<double>(3, 0) = 1;

                Y = PRT * X;

                cv::Point2d pt;
                pt.x = Y.at<double>(0, 0) / Y.at<double>(0, 2);
                pt.y = Y.at<double>(1, 0) / Y.at<double>(0, 2);
                
                tempdis = pow((it->x + it->width/2 - pt.x), 2) + pow((it->y + it->height/2 - pt.y), 2);
                if ( tempdis < distanceThres)  //&&it->contains(pt)
                {   
                    distanceThres = tempdis;
                    marker_center[j] = pt;
                    imgHasPoint = true;
                    flag = i;
                                      
                }                
            }
   
        }
         
        // ros::Time correctTime;
        // correctTime.sec = image->header.stamp.sec - 108 ;
        // correctTime.nsec = image->header.stamp.nsec;
        if (imgHasPoint){
            cv::circle(visImg, marker_center[j], 10, cv::Scalar(255, 255, 0), -1);
            visualization_msgs::Marker finalMaker;
            finalMaker = lidarMarker->markerArray.markers[flag];
            finalMaker.header.frame_id = lidarMarker->header.frame_id;
            finalMaker.header.stamp = image->header.stamp;
            correctMarker(finalMaker);
            fusionMarkerArray.markers.push_back(finalMaker);  
        }
        j++;
    }
    */

    pub_finalMarker.publish(fusionMarkerArray);
    sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", visImg).toImageMsg();
    pub_imgdetec.publish(msg);
    string windowName5 = "LiDAR on Image";
    cv::namedWindow( windowName5, 5 );
    cv::imshow( windowName5, visImg );
    cv::waitKey(1);
}

/* 实车velo16线和相机的投影 */
vector<Point2d> LidarCamFusion::pointcloud2_to_image(const vector<Point3f> msg,
                                         const cv::Size& imageSize)
{
    int w = imageSize.width;
    int h = imageSize.height;

    vector<Point2d> img_points;
    cv::Mat intrinsic_matrix = cv::Mat::eye(3, 3, CV_32FC1);
    cv::Mat distCoeff = cv::Mat(5, 1, DataType<float>::type);
    cv::Mat cameraExtrinsicMat = cv::Mat(4, 4, DataType<float>::type);

    intrinsic_matrix.at<float>(0, 0) = 7.1086713154276424e+02;//748.513026;//1.6415318549788924e+003;
    intrinsic_matrix.at<float>(1, 0) = 0;
    intrinsic_matrix.at<float>(2, 0) = 0;
    intrinsic_matrix.at<float>(0, 1) = 0;
    intrinsic_matrix.at<float>(1, 1) = 7.1002928273752718e+02;//746.151191;//1.7067753507885654e+003;
    intrinsic_matrix.at<float>(2, 1) = 0;
    intrinsic_matrix.at<float>(0, 2) = 3.4648360876958566e+02;//344.761595;//5.3262822453148601e+002;
    intrinsic_matrix.at<float>(1, 2) = 2.1356982249423606e+02;//239.344274;//3.8095355839052968e+002;
    intrinsic_matrix.at<float>(2, 2) = 1.0;

    distCoeff.at<float>(0) = -1.2009796794520683e-01;// -0.181816;//-7.9134632415085826e-001;
    distCoeff.at<float>(1) = -1.7909877827451678e-01;//0.033556;//1.5623584435644169e+000;
    distCoeff.at<float>(2) = 1.4330751500649312e-03;//-0.007749;//-3.3916502741726508e-002;
    distCoeff.at<float>(3) = -1.6035897606267967e-03;//-0.000288;//-1.3921577146136694e-002;
    distCoeff.at<float>(4) = 2.4196197710843320e-01;//0.0;//1.1430734623697941e-002;

    cameraExtrinsicMat.at<float>(0, 0) = -5.3421013207097412e-02;//-2.2885227522464435e-01;
    cameraExtrinsicMat.at<float>(0, 1) = -2.9226997203065541e-02;//-1.5214255212709749e-01;
    cameraExtrinsicMat.at<float>(0, 2) = 9.9814426711894688e-01;//9.6149845551449376e-01;
    cameraExtrinsicMat.at<float>(0, 3) = 1.2616455554962158e-01;//8.4437644109129906e-03;
    cameraExtrinsicMat.at<float>(1, 0) = -9.9851694441775540e-01;//-9.7303107309702697e-01;
    cameraExtrinsicMat.at<float>(1, 1) = 1.2067176116371425e-02;//6.3942072996323596e-03;
    cameraExtrinsicMat.at<float>(1, 2) = -5.3087615987213732e-02;//-2.3058543948102478e-01;
    cameraExtrinsicMat.at<float>(1, 3) = -1.5259952284395695e-02;//-2.1137388423085213e-02;
    cameraExtrinsicMat.at<float>(2, 0) = -1.0493191056894891e-02;//2.8933836803155533e-02;
    cameraExtrinsicMat.at<float>(2, 1) = -9.9949995792649671e-01;//-9.8833787640930915e-01;
    cameraExtrinsicMat.at<float>(2, 2) = -2.9828292716396088e-02;//-1.4950275964872894e-01;
    cameraExtrinsicMat.at<float>(2, 3) = -5.2367319585755467e-04;//2.1896010637283325e-01;
    cameraExtrinsicMat.at<float>(3, 0) = 0.0;
    cameraExtrinsicMat.at<float>(3, 1) = 0.0;
    cameraExtrinsicMat.at<float>(3, 2) = 0.0;
    cameraExtrinsicMat.at<float>(3, 3) = 1.0;
    
    cv::Mat invRt;
    cv::Mat invTt = cv::Mat(3, 1, cv::DataType<double>::type);
    invRt = cameraExtrinsicMat(cv::Rect(0, 0, 3, 3));
    cv::Mat invT = -invRt.t() * (cameraExtrinsicMat(cv::Rect(3, 0, 1, 3)));
    invTt = invT.t();

    cv::Mat point(1, 3, CV_64F);
    cv::Point2d imagepoint;

    for(int size = 0; size < msg.size(); ++size)
    {
        for (int i = 0; i < 3; i++)
        {
            point.at<double>(i) = invTt.at<float>(i);
            // std::cout<<"point1 = "<<point<<std::endl;
            for (int j = 0; j < 3; j++)
            {
                if(j == 0) {point.at<double>(i) += double(msg[size].x) * invRt.at<float>(j, i);}
                else if(j == 1) {point.at<double>(i) += double(msg[size].y) * invRt.at<float>(j, i);}
                else {point.at<double>(i) += double(msg[size].z) * invRt.at<float>(j, i);}
            }
        }

        double tmpx = point.at<double>(0) / point.at<double>(2);
        double tmpy = point.at<double>(1) / point.at<double>(2);
        double r2 = tmpx * tmpx + tmpy * tmpy;
        double tmpdist =
            1 + distCoeff.at<float>(0) * r2 + distCoeff.at<float>(1) * r2 * r2 + distCoeff.at<float>(4) * r2 * r2 * r2;

        imagepoint.x =
            tmpx * tmpdist + 2 * distCoeff.at<float>(2) * tmpx * tmpy + distCoeff.at<float>(3) * (r2 + 2 * tmpx * tmpx);
        imagepoint.y =
            tmpy * tmpdist + distCoeff.at<float>(2) * (r2 + 2 * tmpy * tmpy) + 2 * distCoeff.at<float>(3) * tmpx * tmpy;
        imagepoint.x = intrinsic_matrix.at<float>(0, 0) * imagepoint.x + intrinsic_matrix.at<float>(0, 2);
        imagepoint.y = intrinsic_matrix.at<float>(1, 1) * imagepoint.y + intrinsic_matrix.at<float>(1, 2);

        int px = int(imagepoint.x + 0.5);
        int py = int(imagepoint.y + 0.5);
        Point2d tmp_pt(px, py);
        img_points.push_back(tmp_pt);
    }
    
    return img_points;
}

void LidarCamFusion::correctMarker(visualization_msgs::Marker & marker)
{
    for(int i = 0; i < marker.points.size(); ++i){
        marker.points[i].x -= 2.2;
    }
    marker.lifetime = ros::Duration(0.1);
}

float LidarCamFusion::get_color(int c, int x, int max)          //set the color of lines
{
    float ratio = ((float)x/max)*5;
    int i=floor(ratio);
    int j=ceil(ratio);
    ratio -= i;
    float r=(1-ratio)*colors[i][c]+ratio*colors[j][c];
    return r;
}

int main(int argc, char **argv)
{
    ros::init(argc, argv, "LidarCamFusion");
    ros::NodeHandle nh("~");
    LidarCamFusion core(nh);

}