#include "Kinectv3.h"
#include <unistd.h>
#include <time.h>
#include <opencv2/opencv.hpp>
#include <pcl/visualization/cloud_viewer.h>


#define NON_OPTIMIZE 0



hitcrt::Kinectv3 cam("斜搭.mkv"); ///创建类对象，并打开路径上的mkv视频

int main()
{
    std::cout << "进入主函数！" << std::endl;
    std::shared_ptr<cv::Mat> depth_image;
    std::shared_ptr<cv::Mat> color_image;
    cv::Mat yuv_image;
    //智能指针，是不需要销毁的指针。本驱动所有图片均由智能指针来访问，访问方式和普通指针相同，例如： *depth_image
    pcl::visualization::CloudViewer viewer("viewer");                              //初始化点云查看器
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_red(new pcl::PointCloud<pcl::PointXYZ>); //初始化点云指针
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_blue(new pcl::PointCloud<pcl::PointXYZ>); //初始化点云指针
    pcl::PointXYZ p;
    std::string result;
    clock_t start,end;
    std::vector<double> fps_nums; // 帧率计算相关
    double fps_sum = 0; 
    double fps_num; 

    while (!viewer.wasStopped())
    {
        start = clock();
        result = cam.grab();

        if (result == "eof")
        {
            std::cout << "视频结束！" << std::endl;
            break;
        }
        else if (result == "timeout")
        {
            std::cout << "抓图超时！" << std::endl;
            continue;
        }
        else if (result == "failed")
        {
            std::cout << "抓图出现未知错误" << std::endl;
            continue;
        }
        else if (result == "init")
        {
            std::cout << "初始化未成功" << std::endl;
            continue;
        }
        std::cout << "开始抓图！" << std::endl;

        //你需要获得这两个图片，来方便你进行操作
        depth_image = cam.getDepthImage(); ///获取与深度图
        color_image = cam.getAlignedColorImage();   ///获取与深度图对齐的彩色图


        //resize(*depth_image, *depth_image,cv::Size(),0.5, 0.5);
        //resize(*color_image, *color_image,cv::Size(),0.5, 0.5);
        //auto cloud_All = cam.getPointcloud();     ///获取总的点云，此操作用openmp加速了，但这个整体的点云信息并没有什么用...

        /*
        /////以下为重要的内容
        /////一个从彩色图像素点转换到对应3D坐标点的例程
        /////提示：依据找到的红/蓝连通域的位置信息，可以循环调用这个功能，最终你将获得一个点云～
        */
        //std::cout << "kinect!" << std::endl;
        hitcrt::Kinectv3::Point flatPoint; //这个驱动特有的三维点的类型
        int col_num = (*color_image).cols;
        int row_num = ((*color_image).rows/4)*3;
        
        cv::cvtColor(*color_image, yuv_image, cv::COLOR_BGR2YUV); //转为YUV格式，在UV通道分辨颜色

        for(int row=0 ; row < row_num ; row+=2){
            for(int col=0 ; col < col_num ; col+=2){
                if ((float)(*depth_image).at<short>(row, col) <= 0 || (float)(*depth_image).at<short>(row, col) >= 2500.0)
                {        // 使用short访问深度图中的深度值的信息
                    //贴心地帮您去掉丢深度信息点或者深度信息太远的点的深度值，深度信息单位为毫米mm
                }
                else
                {
                    if(((yuv_image).at<cv::Vec3b>(row, col)[2]) > 170){
                        cam.get3dPoint(hitcrt::Kinectv3::Point((float)col, (float)row, (float)(*depth_image).at<short>(row, col)), flatPoint);
                        //从深度图图片的像素位置索引及其深度大小来获取三维点位置信息。
                        // flatPoint当中装的是3D点，3D点的单位为米m


                        p.x = flatPoint.x;
                        p.y = flatPoint.y;
                        p.z = flatPoint.z;
                    
                        cloud_red->points.push_back(p); //将点p保存到点云当中
                    }
                    if(((yuv_image).at<cv::Vec3b>(row, col)[1]) > 150){
                        cam.get3dPoint(hitcrt::Kinectv3::Point((float)col, (float)row, (float)(*depth_image).at<short>(row, col)), flatPoint);
                        //从深度图图片的像素位置索引及其深度大小来获取三维点位置信息。
                        // flatPoint当中装的是3D点，3D点的单位为米m

                        p.x = flatPoint.x;
                        p.y = flatPoint.y;
                        p.z = flatPoint.z;
                    
                        cloud_blue->points.push_back(p); //将点p保存到点云当中
                    }
                }
            }
        }
        //显示抓图获取的图像的功能
        //以下的操作仅供参考
        //if ((cloud_red != NULL)&&(cloud_blue != NULL))
        //{
            //std::vector<cv::Point2f> p2d;
            //cam.get2dPoints(cloud_All, p2d); ///从点云变换为像素坐标，此操作用openmp加速了
            // std::cout<<"3d to 2d average calc time:"<<totaltime1/count<<"ms"<<std::endl;

            //viewer.showCloud(cloud_red);
        //}
#if NON_OPTIMIZE
        if (color_image != NULL && !color_image->empty())
        {
            uint64_t time = cam.getColorTime(); ///彩色图采样时间，为设备时间即距相机打开时的时间，单位ns
            // std::cout<<"color time:"<<time<<std::endl;

            cv::imshow("color", *color_image);
        }
        if (depth_image != NULL && !depth_image->empty())
        {
            uint64_t time = cam.getDepthTime(); ///深度图采样时间，为设备时间即距相机打开时的时间，单位ns
            // std::cout<<"depth time:"<<time<<std::endl;
            cv::Mat depthshow;
            depth_image->convertTo(depthshow, CV_8UC1);
            cv::imshow("depth", depthshow);
            
        }
#endif
        char key = cv::waitKey(1);
        if (key == 27 || key == 'q')
        {
            //按"q"退出
            break;
        }
        if (key == 'p')
        {
            //按"p"暂停
            cv::waitKey(0);
        }
        cloud_red->points.clear(); //清除每一帧中cloud的数据，防止对下一帧的数据造成干扰
        cloud_blue->points.clear(); //清除每一帧中cloud的数据，防止对下一帧的数据造成干扰


        end = clock();
        fps_num = (CLOCKS_PER_SEC/(float)(end-start));
        fps_nums.push_back(fps_num); // 将浮点数添加到nums中
        fps_sum += fps_num; // 将浮点数累加到sum中
        //std::cout<<"FPS: "<< fps_num <<std::endl;
        std::cout << "Avg_FPS = " << (fps_sum / fps_nums.size()) << "   fps_num: " <<fps_nums.size()<< std::endl;
    }
}
