#include "Cotton.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <opencv2/imgproc/imgproc.hpp>
#include <librealsense2/rs.hpp>
#include <chrono>
#include <ros/ros.h>
#include <geometry_msgs/Polygon.h>
#include <geometry_msgs/PolygonStamped.h>
#include <geometry_msgs/Point32.h>

#include <sstream>
using namespace cv;
using namespace std;


float filterd_depth(Mat& depth_map , float c_x , float c_y ,  int win_size_x ,int win_size_y , int N=10  ,float k=.5 ){


    // step 1 : //  get depth value inside the window around center
    int win_lt_x =  (int(c_x) >= win_size_x)? (int(c_x)-  win_size_x)  : 0;   // left top x
    int win_lt_y =  (int(c_y) >= win_size_y)? (int(c_y)-  win_size_y)  : 0;   // left top y
    int win_rb_x =  (win_lt_x +win_size_x < depth_map.cols)? (win_lt_x +win_size_x):(depth_map.cols-1);
    int win_rb_y =  (win_lt_y +win_size_y < depth_map.rows)? (win_lt_y +win_size_y):(depth_map.rows-1);
    Rect win(win_lt_x ,win_lt_y , win_rb_x- win_lt_x , win_rb_y-win_lt_y);

    //Rect win(int(c_x) - win_size_x , int(c_y) - win_size_y , win_size_x, win_size_y);
     //Mat  depths_in_win(1,win_size_x*win_size_y ,   CV_32FC1 ) ;

    int _win_size_x = win_rb_x - win_lt_x;
    int _win_size_y = win_rb_y -win_lt_y;

    Mat  depths_in_win(1,_win_size_x*_win_size_y ,   CV_32FC1 ) ; // depth inside this window
    depth_map(win).copyTo(depths_in_win);
    depths_in_win = depths_in_win.reshape(0 , 1);

    // filter zero values which are NaN value
    vector<Point> locations;   // output, locations of non-zero pixels
    cv::findNonZero(depths_in_win, locations);

    if(locations.size() == 0 )  return 0.0;
    // step 2 :sort the depths , split them into n part, then  calculate k-th (k<n) average value of 10
    Mat nonZeros ;
    for(auto p : locations )
        nonZeros.push_back(  depths_in_win.at<float>(p)  );
    cerr<< nonZeros << "\n" ;
    cv::sort(nonZeros , nonZeros , SORT_ASCENDING);

    int start_idx =  locations.size() * 1. * k  ;
    int end_idx =  start_idx +  locations.size()*1. /N;

    float mean_depth = 0.0;
    for( int i = start_idx ; i < end_idx ; i++) {
        mean_depth += nonZeros.at<float>(0,i);
    }
    mean_depth  = mean_depth / (end_idx - start_idx);  // static mean depth
    return mean_depth;

}



int main(int argc, char * argv[]) try
{
    // camera parameters info 
	const double camera_fx = 914.997314453125;
	const double camera_fy = 914.7178955078125;
	const double camera_cx = 643.2010498046875;
	const double camera_cy = 357.0489807128906;
	const double camera_factor = 1.;  

    string cfg_file = "/home/nvidia/code/catkin_ws/src/cotton_detect/src/config-cotton.yaml";
    Cotton2 Cotton2(cfg_file);
    Cotton2.LoadEngine();

    ros::init(argc, argv, "image_publisher");
    ros::NodeHandle nh;
    ros::Publisher position=nh.advertise<geometry_msgs::PolygonStamped>("object_position",1000);  // detect result Publisher
    // image_transport::ImageTransport it(nh);
    // image_transport::Publisher depth_pub = it.advertise("depth_image", 1);
    // image_transport::Publisher color_pub = it.advertise("color_image", 1);    

    rs2::pipeline pipe;
    rs2::config cfg;
    cfg.enable_stream(RS2_STREAM_DEPTH, 1280, 720, RS2_FORMAT_Z16, 15);
    cfg.enable_stream(RS2_STREAM_COLOR, 1280, 720, RS2_FORMAT_RGB8, 15);
    pipe.start(cfg);
    rs2::align align_to_color(RS2_STREAM_COLOR);

    // rs2::decimation_filter dec_filter;  // Decimation - reduces depth frame density
    // dec_filter.set_option(RS2_OPTION_FILTER_MAGNITUDE, 8);
    rs2::hole_filling_filter hol_fillinf_filter; 
    hol_fillinf_filter.set_option(RS2_OPTION_HOLES_FILL, 0); // Use the value from the left neighbor pixel to fill the hole
    
    int win_size_x = 10;
    int win_size_y = 10;
    const char* img_win = "RGB_Image";
    namedWindow(img_win, WINDOW_AUTOSIZE);

    int index = 0;
    ros::Rate loop_rate(30);    
    while (nh.ok()) {

        // Block program until frames arrive
        rs2::frameset frameset = pipe.wait_for_frames();

        // Align all frames to color viewport
        frameset = align_to_color.process(frameset);
    
        // Try to get a frame of a depth image
        auto depth = frameset.get_depth_frame();
        auto color = frameset.get_color_frame();
	
	cout << depth<< std::endl;
	//waitKey(0);

        if (!depth || !color) 
            break;
        rs2::frame filtered = depth;
        //filtered = dec_filter.process(filtered);
        filtered = hol_fillinf_filter.process(filtered);   
        // cv::Mat image(cv::Size(1280, 720), CV_8UC3, (void*)color.get_data(), cv::Mat::AUTO_STEP); 
        // cv::Mat depthmat(cv::Size(1280, 720), CV_16U, (void*)filtered.get_data(), cv::Mat::AUTO_STEP); 
        cv::Mat image(cv::Size(1280, 720), CV_8UC3, (void*)color.get_data(), cv::Mat::AUTO_STEP); 
        cv::Mat ori_depthmat(cv::Size(1280, 720), CV_16U, (void*)filtered.get_data(), cv::Mat::AUTO_STEP);  // CV_32FC1
	Mat depthmat;
	ori_depthmat.convertTo(depthmat,CV_32FC1);
	
        //cout << depthmat<< std::endl;
        if(!image.data)
            std::cerr << "Problem loading image!!!" << std::endl;

        std::vector<cv::Point2f> object_number;
        geometry_msgs::PolygonStamped msg_object;
        geometry_msgs::Point32 point_detect;
        // start do inference
        Cotton2.InferenceFolder_single2(image, object_number);
	std::vector<float> filter_depth;
        for (size_t i = 0; i < object_number.size(); i++)
        {
            float c_x = object_number[i].x;
            float c_y = object_number[i].y;

          // get filtered depth from depthmat 
           // float d = depthmat.at<float>(static_cast<int>(c_x),static_cast<int>(c_y) );
	    // if ( d <0.00001  ){} 		


           float mean_depth  = filterd_depth(depthmat,c_x,c_y,win_size_x,win_size_y);

	  // turn bbox image-coordinates into camera-coordinates 
	     float x_camera = 	mean_depth / camera_factor;
             float y_camera = - (c_x - camera_cx ) * x_camera / camera_fx ; 
	     float z_camera = 	- (c_y - camera_cy ) * x_camera / camera_fy ; 

            point_detect.x =  x_camera/1000; //object_number[i].x;
            point_detect.y =  y_camera/1000; // object_number[i].y;
            point_detect.z =  z_camera/1000; // mean_depth;
            
		
		
            msg_object.polygon.points.push_back(point_detect);
		
		// draw result
           cv::circle(image, cv::Point(int(c_x), int(c_y)), 3, (0, 0, 255), 3);
	   std::cout<< "mean depth : " << point_detect.x << std::endl;
	  std::cout<< "point_detect.y : " << point_detect.y << std::endl;
	  std::cout<< "point_detect.z : " << point_detect.z << std::endl;
	   std::stringstream ss; 
           ss<< mean_depth; 
	   std::string str_depth =  ss.str();
	 cv::putText(image, str_depth , cv::Point(int(c_x+10), int(c_y+10)) ,FONT_HERSHEY_SIMPLEX,2,Scalar(0,0,255),1,2,false)  ;
        }

	imshow(img_win , image);
	waitKey(0);	

	// frame id
        msg_object.header.frame_id = "object_info";
        position.publish(msg_object);  // publish

        ros::spinOnce(); 
        loop_rate.sleep();
    }

    return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
    std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n    " << e.what() << std::endl;
    return EXIT_FAILURE;
}
catch (const std::exception& e)
{
    std::cerr << e.what() << std::endl;
    return EXIT_FAILURE;
}



