// STL
#include <iostream>
#include <iomanip>
#include <string>
#include <vector>
// ROS
#include <ros/ros.h>
#include <std_msgs/String.h>
// OpenCV
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cv_bridge/cv_bridge.h>
#include <image_transport/image_transport.h>
// User
#include "OpenNI.h"
#include "darknet.hpp"
#include "common_msg/DetectedObject.h"
#include "common_msg/DetectedObjects.h"

// static variables
static cv::VideoCapture capture;
static openni::Device device;
static openni::VideoStream depth_stream;
static openni::VideoFrameRef depth_frame;

static cv::Mat color_img(480, 640, CV_8UC3);
static cv::Mat depth_img(240, 320, CV_16UC1, cv::Scalar(0));
static cv::Mat depth_dis_img(240, 320, CV_8UC1);

#define OPENNI_READ_WAIT_TIMEOUT 50
const double alpha = 255.0 / 8000.0;

void draw_boxes(cv::Mat mat_img, std::vector<bbox_t> result_vec, std::vector<std::string> obj_names,
                int current_det_fps = -1, int current_cap_fps = -1)
{
    for (auto &i : result_vec)
    {
        cv::Scalar color = obj_id_to_color(i.obj_id);
        cv::rectangle(mat_img, cv::Rect(i.x, i.y, i.w, i.h), color, 2);
        if (obj_names.size() > i.obj_id)
        {

            std::string obj_name = obj_names[i.obj_id];
            if (i.track_id > 0)
                obj_name += " - " + std::to_string(i.track_id);
            cv::Size const text_size = getTextSize(obj_name, cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, 2, 0);
            int max_width = (text_size.width > i.w + 2) ? text_size.width : (i.w + 2);
            max_width = std::max(max_width, (int)i.w + 2);
            //max_width = std::max(max_width, 283);
            std::string coords_3d;
            if (!std::isnan(i.z_3d))
            {
                std::stringstream ss;
                ss << std::fixed << std::setprecision(2) << "x:" << i.x_3d << "m y:" << i.y_3d << "m z:" << i.z_3d << "m ";
                coords_3d = ss.str();
                cv::Size const text_size_3d = getTextSize(ss.str(), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, 1, 0);
                int const max_width_3d = (text_size_3d.width > i.w + 2) ? text_size_3d.width : (i.w + 2);
                if (max_width_3d > max_width)
                    max_width = max_width_3d;
            }

            cv::rectangle(mat_img, cv::Point2f(std::max((int)i.x - 1, 0), std::max((int)i.y - 35, 0)),
                          cv::Point2f(std::min((int)i.x + max_width, mat_img.cols - 1), std::min((int)i.y, mat_img.rows - 1)),
                          color, CV_FILLED, 8, 0);
            putText(mat_img, obj_name, cv::Point2f(i.x, i.y - 16), cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, cv::Scalar(0, 0, 0), 2);
            if (!coords_3d.empty())
                putText(mat_img, coords_3d, cv::Point2f(i.x, i.y - 1), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cv::Scalar(0, 0, 0), 1);
        }
    }
    // if (current_det_fps >= 0 && current_cap_fps >= 0)
    // {
    //     std::string fps_str = "FPS detection: " + std::to_string(current_det_fps) + "   FPS capture: " + std::to_string(current_cap_fps);
    //     putText(mat_img, fps_str, cv::Point2f(10, 20), cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, cv::Scalar(50, 255, 0), 2);
    // }
}

void show_console_result(std::vector<bbox_t> const result_vec, std::vector<std::string> const obj_names, int frame_id = -1)
{
    if (frame_id >= 0)
        std::cout << " Frame: " << frame_id << std::endl;
    for (auto &i : result_vec)
    {
        if (obj_names.size() > i.obj_id)
            std::cout << obj_names[i.obj_id] << " - ";
        std::cout << "obj_id = " << i.obj_id << ",  x = " << i.x << ", y = " << i.y
                  << ", w = " << i.w << ", h = " << i.h
                  << std::setprecision(3) << ", prob = " << i.prob << std::endl;
    }
}

std::vector<std::string> objects_names_from_file(std::string const filename)
{
    std::ifstream file(filename);
    std::vector<std::string> file_lines;
    if (!file.is_open())
        return file_lines;
    for (std::string line; getline(file, line);)
        file_lines.push_back(line);
    std::cout << "object names loaded \n";
    return file_lines;
}

int get_color_image()
{
    capture >> color_img;
    if (color_img.empty())
    {
        std::cout << "get color image failed!" << std::endl;
        return -1;
    }
    // cout << "color_img: " << colorImg.size[1] << " x " << colorImg.size[0] << endl;
    return 0;
}

int get_depth_image()
{
    int streamIndex;
    openni::VideoFrameRef frame;

    openni::VideoStream *pStream[] = {&depth_stream};
    openni::Status rc = openni::OpenNI::waitForAnyStream(pStream, 1, &streamIndex, OPENNI_READ_WAIT_TIMEOUT);
    if (rc != openni::STATUS_OK)
    {
        std::cout << "Wait stream failed! Timeout is " << OPENNI_READ_WAIT_TIMEOUT << " ms" << std::endl;
        std::cout << openni::OpenNI::getExtendedError() << std::endl;
        return 1;
    }

    switch (streamIndex)
    {
    case 0:
        rc = depth_stream.readFrame(&depth_frame);
        break;
    default:
        std::cout << "Wait frame error! Stream index: " << streamIndex << std::endl;
        return 1;
    }

    if (rc != openni::STATUS_OK)
    {
        std::cout << "Read failed!" << std::endl;
        std::cout << openni::OpenNI::getExtendedError() << std::endl;
        return 2;
    }

    if (depth_frame.isValid())
    {
        openni::DepthPixel *pDepth = (openni::DepthPixel *)depth_frame.getData();
        depth_img = cv::Mat(depth_frame.getHeight(), depth_frame.getWidth(), CV_16UC1, (unsigned char *)pDepth);
        depth_img.convertTo(depth_dis_img, CV_8UC1, alpha);
        // cout << "depth_img: " << depth_frame.getWidth() << " x " << depth_frame.getHeight() << endl;
    }

    return 0;
}

int open_camera()
{
    openni::Status rc = openni::OpenNI::initialize();
    if (rc != openni::STATUS_OK)
    {
        std::cout << "Init failed!" << std::endl;
        std::cout << openni::OpenNI::getExtendedError() << std::endl;
        return -1;
    }

    rc = device.open(openni::ANY_DEVICE);
    if (rc != openni::STATUS_OK)
    {
        std::cout << "Couldn't open device!" << std::endl;
        std::cout << openni::OpenNI::getExtendedError() << std::endl;
        return -2;
    }

    rc = depth_stream.create(device, openni::SENSOR_DEPTH);
    if (rc != openni::STATUS_OK)
    {
        std::cout << "Couldn't create depth stream!" << std::endl;
        std::cout << openni::OpenNI::getExtendedError() << std::endl;
        return -3;
    }

    // or set Mirror=0 in orbbec.ini
    // depth.setMirroringEnabled(false);

    rc = depth_stream.start();
    if (rc != openni::STATUS_OK)
    {
        std::cout << "Couldn't start the depth stream!" << std::endl;
        std::cout << openni::OpenNI::getExtendedError() << std::endl;
        return -4;
    }

    if (!capture.open(0))
    {
        capture.open(1);
    }

    if (!capture.isOpened())
    {
        std::cout << "Couldn't start the color stream!" << std::endl;
        return -5;
    }

    return 0;
}

void close_camera()
{
    capture.release();
    device.close();
    openni::OpenNI::shutdown();
}

void onMouseClick(int event, int x, int y, int flags, void *userdata)
{
    // cout << event << ", "<< x << ", " << y << endl;
    switch (event)
    {
    case cv::EVENT_LBUTTONUP:
        std::cout << "Depth at " << x << ", " << y << " is " << depth_img.at<ushort>(y / 2, x / 2) << std::endl;
        break;
    default:
        break;
    }
}

void show_cv_image()
{
    cv::namedWindow("color", cv::WINDOW_AUTOSIZE);
    cv::setMouseCallback("color", onMouseClick, NULL);
    int key = 0;
    while (true)
    {
        get_color_image();
        cv::imshow("color", color_img);
        get_depth_image();
        cv::imshow("depth", depth_dis_img);
        key = cv::waitKey(30);
        if (key == 27)
        {
            break;
        }
    }
    cv::destroyAllWindows();
}

// camera_frame: +X: right, +Y: down, +Z: forward
// navigation_frame: +X: forward, +Y: left, +Z: up
void calc_obj_position(common_msg::DetectedObject &obj, bbox_t &box)
{
    // camera intrinsic parameters
    static const float fx = 570.3422;
    static const float fy = 570.3422;
    static const float inv_fx = 1.0 / fx;
    static const float inv_fy = 1.0 / fy;
    static const float u0 = 319.5;
    static const float v0 = 239.5;

    // object center coordinate in depth_img: 320*240 (color_img: 640*480)
    int obj_c_x = (box.x >> 1) + (box.w >> 2);
    int obj_c_y = (box.y >> 1) + (box.h >> 2);

    int yi_start = std::max(0, obj_c_y - 10);
    int yi_stop = std::min(239, obj_c_y + 10);
    int xi_start = std::max(0, obj_c_x - 10);
    int xi_stop = std::min(319, obj_c_x + 10);
    int zc_sum = 0, zc_temp = 0, zc_cnt = 0, zc = 0;
    for (int yi = yi_start; yi < yi_stop; yi += 3)
    {
        for (int xi = xi_start; xi < xi_stop; xi += 3)
        {
            zc_temp = depth_img.at<ushort>(yi, xi);
            if (zc_temp != 0)
            {
                zc_sum += zc_temp;
                zc_cnt++;
            }
        }
    }
    if (zc_cnt > 1)
    {
        zc = zc_sum / zc_cnt;
    }
    else
    {
        zc = 0;
    }

    // pixel coordinate to camera coordinate
    // int zc = depth_img.at<ushort>(obj_c_y, obj_c_x);
    int xc = int((float)zc * inv_fx * ((obj_c_x << 1) - u0));
    int yc = int((float)zc * inv_fy * ((obj_c_y << 1) - v0));

    // obj in camera frame (mm)
    // obj.position.x = xc;
    // obj.position.y = yc;
    // obj.position.z = zc;

    // obj in navigation frame (mm)
    obj.position.x = zc;
    obj.position.y = -xc;
    obj.position.z = -yc;
}

int main(int argc, char *argv[])
{
    // initialize darknet_ros_node
    ros::init(argc, argv, "darknet_ros_node");
    ros::NodeHandle nh("~");
    image_transport::ImageTransport it(nh);
    image_transport::Publisher imgPub = it.advertise("detected_frame", 5);
    ros::Publisher objPub = nh.advertise<common_msg::DetectedObjects>("detected_objects", 5);

    // load param
    std::string base_dir = "/home/yg/Documents/smartgo_ws/src/darknet_ros/darknet/"; // yg
    // std::string base_dir = "/home/smartgo/Documents/cv_ws/src/darknet_ros/darknet/";  // smartgo
    std::string names_file, cfg_file, weights_file, frame_id;
    double thresh, fxy;
    int cam_id, pub_rate;
    nh.param<std::string>("names_file", names_file, base_dir + "smartgo.names");
    nh.param<std::string>("cfg_file", cfg_file, base_dir + "yolov4-tiny-smartgo.cfg");
    nh.param<std::string>("weights_file", weights_file, base_dir + "yolov4-tiny-smartgo.weights");
    nh.param<std::string>("frame_id", frame_id, "camera_link");
    nh.param<double>("thresh", thresh, 0.5f);
    nh.param<double>("fxy", fxy, 0.5f);
    nh.param<int>("cam_id", cam_id, 0);
    nh.param<int>("pub_rate", pub_rate, 10);

    // initialize camera
    open_camera();

    capture >> color_img;
    int video_fps = capture.get(cv::CAP_PROP_FPS);
    cv::Size const frame_size = color_img.size();
    std::cout << "Open webcam: " << cam_id << std::endl;
    std::cout << "Frame size: " << frame_size << std::endl;
    std::cout << "Frame rate: " << video_fps << std::endl;

    // initialize darknet
    std::vector<std::string> obj_names = objects_names_from_file(names_file);
    Detector detector(cfg_file, weights_file);

    // detection data
    std::shared_ptr<image_t> det_image;
    std::vector<bbox_t> result_vec;
    sensor_msgs::ImagePtr imgPtr;
    std_msgs::Header header;
    header.frame_id = frame_id;
    uint64_t seq = 0;
    common_msg::DetectedObject obj;
    common_msg::DetectedObjects objs;

    // ros loop
    cv::Mat draw_img;
    ros::Rate loop_rate(pub_rate);
    while (ros::ok())
    {
        // get image
        get_color_image();                                   // bgr8
        get_depth_image();                                   // 16UC1
        draw_img = color_img.clone();                        // bgr8
        det_image = detector.mat_to_image_resize(color_img); // bgr8

        // detect object and draw bounding box
        result_vec = detector.detect_resized(*det_image, frame_size.width, frame_size.height, thresh, true); // true
        draw_boxes(draw_img, result_vec, obj_names);

        // show image on opencv window
        // cv::imshow("detection", draw_img);
        // cv::waitKey(3);

        // publish detected frame
        if (fxy < 0.9 || fxy > 1.1)
        {
            cv::resize(draw_img, draw_img, cv::Size(), fxy, fxy);
        }
        imgPtr = cv_bridge::CvImage(header, "bgr8", draw_img).toImageMsg();
        imgPub.publish(imgPtr);

        // header
        header.seq = seq++;
        header.stamp = ros::Time::now();

        // publish detected objects
        objs.header = header;
        objs.detected_objects.clear();
        for (bbox_t &box : result_vec)
        {
            obj.obj_id = box.obj_id;
            obj.obj_name = obj_names[obj.obj_id];
            obj.probability = box.prob;
            calc_obj_position(obj, box);
            objs.detected_objects.push_back(obj);
        }
        objPub.publish(objs);

        ros::spinOnce();
        loop_rate.sleep();
    }

    // cv::destroyAllWindows();

    close_camera();

    return 0;
}
