#include "../include/include.h"
#include "../include/Visualizer.h"
#include "time.h"
//#include "pcl/io/pcd_io.h"
//#include "pcl/visualization/pcl_visualizer.h"
using namespace std;
using namespace cv;
using namespace rs2;
//using namespace mlpack;

const size_t inWidth = 416;
const size_t inHeight = 416;
const float WHRatio = inWidth / (float)inHeight;
const float inScaleFactor = 1 / 255.f;
const float meanVal = 127.5;
void image_detection_Cfg();
Mat Dectection(Mat color_mat);
cv::Mat Depthmate, Dec_mat, color_mat;

std::vector<String> outNames;
vector<string> classNamesVec;

cv::dnn::Net net; //DNN net
const String yolo_tiny_model = "../engine/enetb0-coco_final.weights";
const String yolo_tiny_cfg = "../engine/enet-coco.cfg";
const String classname_path = "../engine/coco.names";

vector<Objection> ObjectionOfOneMat; //一幅图中的目标

//camera 参数
Eigen::Matrix<float, 3, 3> MTR;                                                   //相机坐标旋转矩阵
Eigen::Vector3f V_T;                                                              //平移向量T
Eigen::Matrix<float, 3, 3> Inner_Transformation_Depth, InnerTransformation_Color; // 相机内参

int camera_config(rs2::pipeline &pipe)
{
    rs2::log_to_console(RS2_LOG_SEVERITY_WARN);

    /// Create librealsense context for managing devices
    rs2::context ctx;
    int device_num = ctx.query_devices().size();
    std::string serial_number(ctx.query_devices()[0].get_info(RS2_CAMERA_INFO_SERIAL_NUMBER));

    std::cout << "device_num : " << device_num << " serial_number num: " << serial_number << std::endl;

    //创建数据管道
    rs2::config pipe_config;
    pipe_config.enable_stream(RS2_STREAM_DEPTH, 1280, 720, RS2_FORMAT_Z16, 15);
    pipe_config.enable_stream(RS2_STREAM_COLOR, 1280, 720, RS2_FORMAT_BGR8, 15);

    //start()函数返回数据管道的profile
    rs2::pipeline_profile profile = pipe.start(pipe_config);

    //声明数据流
    auto dprofile = profile.get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
    auto cprofile = profile.get_stream(RS2_STREAM_COLOR).as<rs2::video_stream_profile>();

    //获取内参
    auto intrinDepth = dprofile.get_intrinsics();
    Inner_Transformation_Depth << intrinDepth.fx, 0, intrinDepth.ppx, 0, intrinDepth.fy, intrinDepth.ppy, 0, 0, 1;

    auto intrinColor = cprofile.get_intrinsics();
    InnerTransformation_Color << intrinColor.fx, 0, intrinColor.ppx, 0, intrinColor.fy, intrinColor.ppy, 0, 0, 1;

    //直接获取从深度摄像头坐标系到彩色摄像头坐标系的欧式变换矩阵
    auto extrinDepth2Color = dprofile.get_extrinsics_to(cprofile);
    MTR << extrinDepth2Color.rotation[0], extrinDepth2Color.rotation[1], extrinDepth2Color.rotation[2], extrinDepth2Color.rotation[3], extrinDepth2Color.rotation[4], extrinDepth2Color.rotation[5], extrinDepth2Color.rotation[6], extrinDepth2Color.rotation[7], extrinDepth2Color.rotation[8];
    V_T << extrinDepth2Color.translation[0], extrinDepth2Color.translation[1], extrinDepth2Color.translation[2];

    return EXIT_SUCCESS;
}

void depth_rendering(cv::Mat &src, cv::Mat &dst)
{
    double min;
    double max;
    int maxIDX;
    int minIDX;
    cv::minMaxIdx(src, &min, &max, &minIDX, &maxIDX);
    cv::Mat adjMap;

    std::cout << "min :" << min << "max :" << max << std::endl;

    // Histogram Equalization
    float scale = 255 / (max - min);
    src.convertTo(adjMap, CV_8UC1, scale, -min * scale);

    // this is great. It converts your grayscale image into a tone-mapped one,
    // much more pleasing for the eye
    // function is found in contrib module, so include contrib.hpp
    // and link accordingly
    applyColorMap(adjMap, dst, cv::COLORMAP_JET);
}

void image_detection_config(void)
{
    net = cv::dnn::readNetFromDarknet(yolo_tiny_cfg, yolo_tiny_model);
    net.setPreferableBackend(cv::dnn::DNN_TARGET_CPU);
    net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);

    outNames = net.getUnconnectedOutLayersNames();
    for (int i = 0; i < outNames.size(); i++) {
        printf("output layer name : %s\n", outNames[i].c_str());
    }

    ifstream classNamesFile(classname_path);
    if (classNamesFile.is_open())
    {
        string className = "";
        while (std::getline(classNamesFile, className))
            classNamesVec.push_back(className);
    }
}

int main(int argc, char **argv)
{
    //camera
    rs2::pipeline m_pipes; ///生成Realsense管道，用来封装实际的相机设备
    rs2::colorizer color_map;

    camera_config(m_pipes); //相机初始化

    // Declare align
    rs2::align align_to_color(RS2_STREAM_COLOR);

    // Declare filters
    rs2::hole_filling_filter Hole_Filling_filter(1); //孔填充滤波器

    image_detection_config();//二维目标检测Yolov3初始化

    dlib::perspective_window Win_3D; //3D显示窗口
    Win_3D.set_title("ALL Objection 3D Point Cloud");

    while (waitKey(1) != 27)
    {
        //堵塞程序直到新的一帧捕获
        rs2::frameset frameset = m_pipes.wait_for_frames();
        frameset = align_to_color.process(frameset);

        //取深度图和彩色图
        rs2::frame color_frame = frameset.get_color_frame();
        rs2::frame depth_frame = frameset.get_depth_frame();

        //获取宽高
        const int depth_w = depth_frame.as<rs2::depth_frame>().get_width();
        const int depth_h = depth_frame.as<rs2::depth_frame>().get_height();
        const int color_w = color_frame.as<rs2::video_frame>().get_width();
        const int color_h = color_frame.as<rs2::video_frame>().get_height();

        //创建OPENCV类型 并传入数据
        Mat depth_image(Size(depth_w, depth_h), CV_16U, (void *)depth_frame.get_data(), Mat::AUTO_STEP);
        Mat color_image(Size(color_w, color_h), CV_8UC3, (void *)color_frame.get_data(), Mat::AUTO_STEP);

        depth_rendering(depth_image, depth_image);
        imshow("color_image", color_image);
        imshow("depth_image", depth_image);

        cv::waitKey(0);

        Win_3D.clear_overlay();    //清除原有点云信息
        ObjectionOfOneMat.clear(); //清空上一幅图像的目标
        Dec_mat = Dectection(color_mat);
        Win_3D.add_overlay(Visualizer{ObjectionOfOneMat}.Report_PCLOneMat()); //画出点云
        Win_3D.show();
/*
        // for (auto objection : ObjectionOfOneMat)
        // {
        //     if (objection.Enable)
        //     {
        //         cout << objection.Classname << ": ";
        //         for (auto i : objection.Point_Camera)
        //             cout << i << " ";
        //         cout << endl;
        //     }
        // }
        imshow("Dec_mat", Dec_mat);
        imshow("Depthmate", Depthmate);
*/
        if (cv::waitKey(1) == 32)
        {                               //空格
            Win_3D.wait_until_closed(); //暂停
        }
    }
}
Mat Dectection(Mat color_mat)
{
    //getWindowProperty(window_name, WND_PROP_AUTOSIZE) >= 0
    // 加载图像 D435加载
    // color_mat = imread("/home/mzc/code/CLionProjects/DNN435/test_image/195.jpg");

    Mat inputBlob = cv::dnn::blobFromImage(color_mat, inScaleFactor, Size(inWidth, inHeight), Scalar(), true, false);
    net.setInput(inputBlob);
    // 检测
    std::vector<Mat> outs;
    net.forward(outs, outNames);
    vector<double> layersTimings;
    double freq = getTickFrequency() / 1000;
    double time = net.getPerfProfile(layersTimings) / freq;
    double FPS = 1000 / time;
    ostringstream ss;
    ss << "FPS: " << FPS;
    putText(color_mat, ss.str(), Point(0, 10), 0, 0.5, Scalar(255, 0, 0));
    vector<Rect> boxes;
    vector<int> classIds;
    vector<float> confidences;
    for (size_t i = 0; i < outs.size(); ++i)
    {
        float *data = (float *)outs[i].data;
        for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols)
        {
            Mat scores = outs[i].row(j).colRange(5, outs[i].cols);
            Point classIdPoint;
            double confidence;
            minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
            if (confidence > 0.5)
            {
                int centerX = (int)(data[0] * color_mat.cols);
                int centerY = (int)(data[1] * color_mat.rows);
                int width = (int)(data[2] * color_mat.cols);
                int height = (int)(data[3] * color_mat.rows);
                int left = centerX - width / 2;
                int top = centerY - height / 2;
                classIds.push_back(classIdPoint.x);
                confidences.push_back((float)confidence);
                boxes.push_back(Rect(left, top, width, height));
            }
        }
    }

    vector<int> indices;
    cv::dnn::NMSBoxes(boxes, confidences, 0.5, 0.2, indices);
    for (size_t i = 0; i < indices.size(); ++i)
    {
        int idx = indices[i];
        Rect box = boxes[idx];
        auto ClassID = classIds[idx];
        String className = classNamesVec[classIds[idx]];
        Objection NewObjection(box, ClassID);
        ObjectionOfOneMat.push_back(NewObjection);
        putText(color_mat, className.c_str(), box.tl(), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(255, 0, 0), 2, 8);
        rectangle(color_mat, box, Scalar(0, 0, 255), 2, 8, 0);
    }
    return color_mat;
}
