#include "Kinectv3.h"
#include <unistd.h>
#include <time.h>
#include <opencv2/opencv.hpp>
#include <pcl/visualization/cloud_viewer.h>

#include <pcl/ModelCoefficients.h>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/passthrough.h>
#include <pcl/features/normal_3d.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/segmentation/sac_segmentation.h>
#include <pcl/visualization/pcl_visualizer.h>

#include <iostream>
#include <vector>
#include <opencv2/core/core.hpp>
#include <memory>
#define NON_OPTIMIZE 0 

#if NON_OPTIMIZE
#endif


hitcrt::Kinectv3 cam("斜搭.mkv"); ///创建类对象，并打开路径上的mkv视频




void cylinder_segmentation(pcl::PointCloud<pcl::PointXYZ>::Ptr input,
                    pcl::PointCloud<pcl::PointXYZ>::Ptr output,
                    pcl::ModelCoefficients::Ptr coefficients,
                    float minRadius,float maxRadius){
    //std::cout<<"cloud_blue has : "<<cloud_blue->points.size()<<"points"<<std::endl;
    pcl::PointCloud<pcl::Normal>::Ptr cloud_normals(new pcl::PointCloud<pcl::Normal>);
    pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
    pcl::search::KdTree<pcl::PointXYZ>::Ptr tree(new pcl::search::KdTree<pcl::PointXYZ>());
    pcl::SACSegmentationFromNormals<pcl::PointXYZ, pcl::Normal> seg;
   	ne.setInputCloud(input);
    ne.setSearchMethod(tree);  //设置搜索方法
    ne.setKSearch (50);
    ne.compute(*cloud_normals);
	//pcl::PointCloud<pcl::PointXYZ>::Ptr output_cloud(new pcl::PointCloud<pcl::PointXYZ>);
    //pcl::ModelCoefficients::Ptr coefficients_cylinder(new pcl::ModelCoefficients);
    pcl::PointIndices::Ptr inliers_cylinder(new pcl::PointIndices);
    seg.setOptimizeCoefficients(true);
    seg.setModelType(pcl::SACMODEL_CYLINDER);  //设置成圆柱模型
    seg.setMethodType(pcl::SAC_RANSAC);
    seg.setNormalDistanceWeight(0.1);  //法线在估计的权重
    seg.setMaxIterations(10000);       //迭代次数
    seg.setDistanceThreshold(0.01);
    seg.setRadiusLimits(minRadius, maxRadius);
    seg.setInputCloud(input);
    seg.setInputNormals(cloud_normals);
    seg.segment(*inliers_cylinder, *coefficients);
    pcl::copyPointCloud(*input, inliers_cylinder->indices, *output);
    //std::cerr << "Cylinder: " << *coefficients_cylinder << std::endl;
}
void draw_rectangle(pcl::PointCloud<pcl::PointXYZ>::Ptr input,
                    std::shared_ptr<cv::Mat> color_image){
            std::vector<cv::Point2f> p2d;
            cam.get2dPoints(input, p2d); ///从点云变换为像素坐标，此操作用openmp加速了
            // std::cout<<"3d to 2d average calc time:"<<totaltime1/count<<"ms"<<std::endl;
            //viewer.showCloud(output_cloud_blue);
            float minX = p2d[0].x;
            float minY = p2d[0].y;
            float maxX = p2d[0].x;
            float maxY = p2d[0].y;
            for (const cv::Point2f& point : p2d) {// Iterate through the vector to find the min and max values
                if (point.x < minX) {minX = point.x;}
                if (point.y < minY) {minY = point.y;}
                if (point.x > maxX) {maxX = point.x;}
                if (point.y > maxY) {maxY = point.y;}
            }
            /*
            std::cout << "Min X: " << minX << std::endl;
            std::cout << "Min Y: " << minY << std::endl;
            std::cout << "Max X: " << maxX << std::endl;
            std::cout << "Max Y: " << maxY << std::endl;
            */
            cv::Point topLeft(minX, minY);
            cv::Point bottomRight(maxX, maxY);
            // Draw the rectangle on the image
            cv::rectangle(*color_image, topLeft, bottomRight, cv::Scalar(0, 255, 0), 2); // Replace cv::Scalar with your desired color and thickness
            // Show the image with the rectangle

}


int main()
{
    std::cout << "进入主函数！" << std::endl;
    std::shared_ptr<cv::Mat> depth_image;
    std::shared_ptr<cv::Mat> color_image;

    cv::Mat yuv_image;
    //智能指针，是不需要销毁的指针。本驱动所有图片均由智能指针来访问，访问方式和普通指针相同，例如： *depth_image
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_red(new pcl::PointCloud<pcl::PointXYZ>); //初始化点云指针
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_blue(new pcl::PointCloud<pcl::PointXYZ>); //初始化点云指针
    pcl::PointXYZ p;
    std::string result;
    int red_success = 0;
    int blue_success = 0;

#if NON_OPTIMIZE
    pcl::visualization::CloudViewer viewer("viewer");  //初始化点云查看器 
#endif
    clock_t start,end;
    std::vector<double> fps_nums; // 帧率计算相关
    double fps_sum = 0; 
    double fps_num; 

    while (true)
    {
        start = clock();
        result = cam.grab();

        if (result == "eof")
        {
            std::cout << "视频结束！" << std::endl;
            break;
        }
#if NON_OPTIMIZE
        else if (result == "timeout")
        {
            std::cout << "抓图超时！" << std::endl;
            continue;
        }
        else if (result == "failed")
        {
            std::cout << "抓图出现未知错误" << std::endl;
            continue;
        }
        else if (result == "init")
        {
            std::cout << "初始化未成功" << std::endl;
            continue;
        }
#endif
        //std::cout << "开始抓图！" << std::endl;
        //你需要获得这两个图片，来方便你进行操作
        depth_image = cam.getDepthImage(); ///获取与深度图
        color_image = cam.getAlignedColorImage();   ///获取与深度图对齐的彩色图

        /*
        /////以下为重要的内容
        /////一个从彩色图像素点转换到对应3D坐标点的例程
        /////提示：依据找到的红/蓝连通域的位置信息，可以循环调用这个功能，最终你将获得一个点云～
        */
        //std::cout << "kinect!" << std::endl;
        hitcrt::Kinectv3::Point flatPoint; //这个驱动特有的三维点的类型
        int col_num = (*color_image).cols;
        int row_num = ((*color_image).rows/4)*3;//深度图最下方毫无有用数据
        
        cv::cvtColor(*color_image, yuv_image, cv::COLOR_BGR2YUV); //转为YUV格式，在UV通道分辨颜色

        for(int row=0 ; row < row_num ; row+=2){    //降采样，精度换速度
            for(int col=0 ; col < col_num ; col+=2){    
                if ((float)(*depth_image).at<short>(row, col) <= 0 || (float)(*depth_image).at<short>(row, col) >= 2500.0)
                {        // 使用short访问深度图中的深度值的信息
                    //贴心地帮您去掉丢深度信息点或者深度信息太远的点的深度值，深度信息单位为毫米mm
                }
                else
                {
                    if(((yuv_image).at<cv::Vec3b>(row, col)[2]) > 170){//V通道提取红色
                        cam.get3dPoint(hitcrt::Kinectv3::Point((float)col, (float)row, (float)(*depth_image).at<short>(row, col)), flatPoint);
                        //从深度图图片的像素位置索引及其深度大小来获取三维点位置信息。
                        // flatPoint当中装的是3D点，3D点的单位为米m
                        p.x = flatPoint.x;
                        p.y = flatPoint.y;
                        p.z = flatPoint.z;
                    
                        cloud_red->points.push_back(p); //将点p保存到点云当中
                    }
                    if(((yuv_image).at<cv::Vec3b>(row, col)[1]) > 150){//U通道提取蓝色
                        cam.get3dPoint(hitcrt::Kinectv3::Point((float)col, (float)row, (float)(*depth_image).at<short>(row, col)), flatPoint);
                        //从深度图图片的像素位置索引及其深度大小来获取三维点位置信息。
                        // flatPoint当中装的是3D点，3D点的单位为米m
                        p.x = flatPoint.x;
                        p.y = flatPoint.y;
                        p.z = flatPoint.z;
                    
                        cloud_blue->points.push_back(p); //将点p保存到点云当中
                    }
                }
            }
        }


        pcl::PointCloud<pcl::PointXYZ>::Ptr output_cloud_blue(new pcl::PointCloud<pcl::PointXYZ>);
        pcl::PointCloud<pcl::PointXYZ>::Ptr output_cloud_red(new pcl::PointCloud<pcl::PointXYZ>);
        std::cout<<"cloud_blue has : "<<cloud_blue->points.size()<<"points"<<std::endl;
        std::cout<<"cloud_red has : "<<cloud_red->points.size()<<"points"<<std::endl;
        bool k = false;
        if (cloud_blue->points.size() > 20){
            pcl::ModelCoefficients::Ptr coefficients_cylinder_blue(new pcl::ModelCoefficients);
            cylinder_segmentation(cloud_blue,output_cloud_blue,coefficients_cylinder_blue,0.1,0.2);
    	    //std::cerr << "Cylinder_Blue: " << *coefficients_cylinder_blue << std::endl;
            if(output_cloud_blue->points.size() > 5){
                draw_rectangle(output_cloud_blue,color_image);
                k = true;
                blue_success++;
            }
        }
        if (cloud_red->points.size() > 20){
            pcl::ModelCoefficients::Ptr coefficients_cylinder_red(new pcl::ModelCoefficients);
            cylinder_segmentation(cloud_red,output_cloud_red,coefficients_cylinder_red,0.15,0.3);
    	    //std::cerr << "Cylinder_Blue: " << *coefficients_cylinder_red << std::endl;
            if(output_cloud_red->points.size() > 5){
                draw_rectangle(output_cloud_red,color_image);
                k = true;
                red_success++;
            }
        }
        if(k){cv::imshow("Image with Rectangle", *color_image);}
        

#if 0//内为未封装的识别蓝色程序
        std::cout<<"cloud_blue has : "<<cloud_blue->points.size()<<"points"<<std::endl;
        pcl::PointCloud<pcl::Normal>::Ptr cloud_normals_blue(new pcl::PointCloud<pcl::Normal>);
    	pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne_blue;
    	pcl::search::KdTree<pcl::PointXYZ>::Ptr tree_blue(new pcl::search::KdTree<pcl::PointXYZ>());

    	pcl::SACSegmentationFromNormals<pcl::PointXYZ, pcl::Normal> seg_blue;
   		ne_blue.setInputCloud(cloud_blue);
    	ne_blue.setSearchMethod(tree_blue);  //设置搜索方法
    	ne_blue.setKSearch (50);
    	ne_blue.compute(*cloud_normals_blue);
		
		pcl::PointCloud<pcl::PointXYZ>::Ptr output_cloud_blue(new pcl::PointCloud<pcl::PointXYZ>);
    	pcl::ModelCoefficients::Ptr coefficients_cylinder_blue(new pcl::ModelCoefficients);
    	pcl::PointIndices::Ptr inliers_cylinder_blue(new pcl::PointIndices);
    	seg_blue.setOptimizeCoefficients(true);
    	seg_blue.setModelType(pcl::SACMODEL_CYLINDER);  //设置成圆柱模型
    	seg_blue.setMethodType(pcl::SAC_RANSAC);
    	seg_blue.setNormalDistanceWeight(0.1);  //法线在估计的权重
    	seg_blue.setMaxIterations(10000);       //迭代次数
    	seg_blue.setDistanceThreshold(0.01);
    	seg_blue.setRadiusLimits(0.1, 0.2);
    	seg_blue.setInputCloud(cloud_blue);
    	seg_blue.setInputNormals(cloud_normals_blue);
    	seg_blue.segment(*inliers_cylinder_blue, *coefficients_cylinder_blue);
    	pcl::copyPointCloud(*cloud_blue, inliers_cylinder_blue->indices, *output_cloud_blue);
    	std::cerr << "Cylinder_Blue: " << *coefficients_cylinder_blue << std::endl;

        if (output_cloud_blue->points.size() > 10)
        {
            std::vector<cv::Point2f> p2d;
            cam.get2dPoints(output_cloud_blue, p2d); ///从点云变换为像素坐标，此操作用openmp加速了
            // std::cout<<"3d to 2d average calc time:"<<totaltime1/count<<"ms"<<std::endl;
            viewer.showCloud(output_cloud_blue);
            float minX = p2d[0].x;
            float minY = p2d[0].y;
            float maxX = p2d[0].x;
            float maxY = p2d[0].y;
            for (const cv::Point2f& point : p2d) {// Iterate through the vector to find the min and max values
                if (point.x < minX) {minX = point.x;}
                if (point.y < minY) {minY = point.y;}
                if (point.x > maxX) {maxX = point.x;}
                if (point.y > maxY) {maxY = point.y;}
            }
            // Output the min and max values
            std::cout << "Min X: " << minX << std::endl;
            std::cout << "Min Y: " << minY << std::endl;
            std::cout << "Max X: " << maxX << std::endl;
            std::cout << "Max Y: " << maxY << std::endl;
            cv::Point topLeft(minX, minY);
            cv::Point bottomRight(maxX, maxY);
            // Draw the rectangle on the image
            cv::rectangle(*color_image, topLeft, bottomRight, cv::Scalar(0, 255, 0), 2); // Replace cv::Scalar with your desired color and thickness
            // Show the image with the rectangle
            cv::imshow("Image with Rectangle", *color_image);
        }
#endif


#if NON_OPTIMIZE
        if (color_image != NULL && !color_image->empty())
        {
            uint64_t time = cam.getColorTime(); ///彩色图采样时间，为设备时间即距相机打开时的时间，单位ns
            // std::cout<<"color time:"<<time<<std::endl;

            cv::imshow("color", *color_image);
        }
        if (depth_image != NULL && !depth_image->empty())
        {
            uint64_t time = cam.getDepthTime(); ///深度图采样时间，为设备时间即距相机打开时的时间，单位ns
            // std::cout<<"depth time:"<<time<<std::endl;
            cv::Mat depthshow;
            depth_image->convertTo(depthshow, CV_8UC1);
            cv::imshow("depth", depthshow);
            
        }
#endif
        char key = cv::waitKey(1);
        if (key == 27 || key == 'q')
        {
            //按"q"退出
            break;
        }
        if (key == 'p')
        {
            //按"p"暂停
            cv::waitKey(0);
        }
        cloud_red->points.clear(); //清除每一帧中cloud的数据，防止对下一帧的数据造成干扰
        cloud_blue->points.clear(); //清除每一帧中cloud的数据，防止对下一帧的数据造成干扰

        end = clock();
        fps_num = (CLOCKS_PER_SEC/(float)(end-start));
        fps_nums.push_back(fps_num); // 将浮点数添加到nums中
        fps_sum += fps_num; // 将浮点数累加到sum中
        //std::cout<<"FPS: "<< fps_num <<std::endl;
        std::cout << "Avg_FPS = " << (fps_sum / fps_nums.size()) << "   fps_num: " <<fps_nums.size()<< std::endl;
        std::cout << "red_success = " << ((float)red_success/fps_nums.size()) << "  blue_success = " << ((float)blue_success/fps_nums.size())<< std::endl;
    }
    
}
