/* Copyright (c) 2022-2023 Qualcomm Technologies, Inc.
 * All Rights Reserved.
 * Confidential and Proprietary - Qualcomm Technologies, Inc.
 */

#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <inttypes.h>
#include <thread>
#include <functional>
#include <iostream>
#include <fstream>

#include <algorithm>

#include "common.h"
#include "tracker.h"
#include "rclcpp/rclcpp.hpp"

#include "benchmark.h"
#include "cpu.h"
#include<sys/time.h>

using namespace cv;
using namespace std;

float Sigmoid(float x)
{
    return 1.0f / (1.0f + exp(-x));
}

float Tanh(float x)
{
    return 2.0f / (1.0f + exp(-2 * x)) - 1;
}

float IntersectionArea(const TargetBox &a, const TargetBox &b)
{
    if (a.x1 > b.x2 || a.x2 < b.x1 || a.y1 > b.y2 || a.y2 < b.y1)
    {
        // no intersection
        return 0.f;
    }

    float inter_width = std::min(a.x2, b.x2) - std::max(a.x1, b.x1);
    float inter_height = std::min(a.y2, b.y2) - std::max(a.y1, b.y1);

    return inter_width * inter_height;
}

bool scoreSort(TargetBox a, TargetBox b)
{
    return (a.score > b.score);
}

//NMS handle
int nmsHandle(std::vector<TargetBox> &src_boxes, std::vector<TargetBox> &dst_boxes, int rows, int cols)
{
    std::vector<int> picked;

    sort(src_boxes.begin(), src_boxes.end(), scoreSort);

    for (int i = 0; i < src_boxes.size(); i++)
    {
        int keep = 1;
        for (int j = 0; j < picked.size(); j++)
        {
            //intersection
            float inter_area = IntersectionArea(src_boxes[i], src_boxes[picked[j]]);
            //union
            float union_area = src_boxes[i].area() + src_boxes[picked[j]].area() - inter_area;
            float IoU = inter_area / union_area;

            if(IoU > 0.45 && src_boxes[i].category == src_boxes[picked[j]].category)
            {
                keep = 0;
                break;
            }
        }

        if (keep) {
            picked.push_back(i);
        }
    }

    for (int i = 0; i < picked.size(); i++)
    {
        src_boxes[picked[i]].x1 = src_boxes[picked[i]].x1 > 0 ? src_boxes[picked[i]].x1 : 0;
        src_boxes[picked[i]].y1 = src_boxes[picked[i]].y1 > 0 ? src_boxes[picked[i]].y1 : 0;
        src_boxes[picked[i]].x2 = src_boxes[picked[i]].x2 >= src_boxes[picked[i]].x1 ? src_boxes[picked[i]].x2 : cols - 2;
        src_boxes[picked[i]].y2 = src_boxes[picked[i]].y2 >= src_boxes[picked[i]].y1 ? src_boxes[picked[i]].y2 : rows - 2;
        dst_boxes.push_back(src_boxes[picked[i]]);
    }

    return 0;
}

int maxIoU_Handle(std::vector<TargetBox> &src_boxes, TargetBox &target_boxes, float &maxIoU)
{
    float maxArea = 0;
    int index = 0;

    for (int i = 0; i < src_boxes.size(); i++)
    {
        //intersection
        float inter_area = IntersectionArea(src_boxes[i], target_boxes);
        //union
        // float union_area = src_boxes[i].area() + target_boxes.area() - inter_area;
        // float IoU = inter_area / union_area;
        float IoU = inter_area / target_boxes.area();

        if(IoU > maxArea)
        {
            maxArea = IoU;
            index = i;
        }
    }

    maxIoU = maxArea;

    return index;
}

/* MyTracker class */
MyTracker::MyTracker()
{
   tracking_type = TRACKING_TYPE_DEFAULT;
}

MyTracker::MyTracker(int type, std::string model_param_file, std::string model_bin_file, std::string label_file):tracking_type(type),
                   model_param_file(model_param_file), model_bin_file(model_bin_file), label_file(label_file)
{
    if(type == TRACKING_TYPE_CV)
	{
        initCvTracker();
	}
}

MyTracker::~MyTracker()
{
}

void MyTracker::initCvTracker(void)
{
    // ncnn::set_cpu_powersave(2);
    // net.opt.num_threads = 1;
    net.opt.num_threads = 1;  
    ncnn::CpuSet mask;
    mask.enable(7);
    // mask.enable(5);
    // mask.enable(6);
    ncnn::set_cpu_thread_affinity(mask);

    if (net.load_param(model_param_file.c_str()))
    {
        cout << "Failed to load the model param file." << std::endl;
        exit(-1);
    }

    if (net.load_model(model_bin_file.c_str()))
    {
        cout << "Failed to load the model bin file." << std::endl;
        exit(-1);
    }

    input_width = NCNN_MODEL_INPUT_WIDTH;
    input_height = NCNN_MODEL_INPUT_HEIGTH;
    thresh = NCNN_MODEL_THRESH;

    std::ifstream file(label_file);
    if (!file) {
        std::cerr << "Failed to read " << label_file << "." << std::endl;
        exit(-1);
    }

    std::string line;
    int index = 0;
    while (std::getline(file, line)) {
        labels.push_back(line);
        labelsIndexToDetect.push_back(index);
        index++;
    }
    if(labelsIndexToDetect.size()) {
        for(auto &i : labelsIndexToDetect) {
            std::cout << "index of lable " << labels[i] << "to detect is: " << i << std::endl;
        }
    }
    else {
        std::cerr << "no matching labels to detect." << std::endl;
        exit(-1);
    }



    // distance_error_num = 0;

    /* start camera */
    cam.start();
}

void MyTracker::camLock()
{
    camMutex.lock();
}

void MyTracker::camUnlock()
{
    camMutex.unlock();
}

void MyTracker::getImage_thread(bool &running) {
    /* run this on the super core */
    cpu_set_t mask;
    CPU_ZERO(&mask);
    CPU_SET(5, &mask);
    sched_setaffinity(0, sizeof(mask), &mask); //fisrt param 0 means this PID

    sleep(1);
    while(running) {
        cam.getImage(frame, depth);
        camUnlock();
    }

}

void MyTracker::getImageOnce(void)
{
    cam.getImage(frame, depth);
}

//CV tracking
bool MyTracker::trackingOnce(std::vector<TargetBox> &target_boxes)
{
    bool ret = false;

    // cam.getImage(frame, depth);

    ret = cvObjectDetection(frame, target_boxes);

    return ret;
}

void MyTracker::getTargetDistance(cv::Rect &bbox, float &horizon_dist_to_target)
{
    /* update last_box_center here */
    int box_center_x = bbox.width/2 + bbox.x;
    int box_center_y = bbox.height/2 + bbox.y;
    
    rs2::depth_frame depth_frame = cam.frames.get_depth_frame();
    float dist_to_center = depth_frame.get_distance(box_center_x, box_center_y);

    float upixel[2]; // From pixel
    float upoint[3]; // From point (in 3D)

    float vpixel[2]; // To pixel
    float vpoint[3]; // To point (in 3D)

    // Copy pixels into the arrays (to match rsutil signatures)
    upixel[0] = box_center_x;
    upixel[1] = bbox.y + bbox.height / 8;
    vpixel[0] = box_center_x;
    vpixel[1] = bbox.y + bbox.height * 7 / 8;

    // Query the frame for distance
    // Note: this can be optimized
    // It is not recommended to issue an API call for each pixel
    // (since the compiler can't inline these)
    // However, in this example it is not one of the bottlenecks
    auto udist = depth_frame.get_distance(static_cast<int>(upixel[0]), static_cast<int>(upixel[1]));
    auto vdist = depth_frame.get_distance(static_cast<int>(vpixel[0]), static_cast<int>(vpixel[1]));

    float udist_pow2 = pow(udist, 2.f);
    float vdist_pow2 = pow(vdist, 2.f);

    // Deproject from pixel to point in 3D
    rs2_intrinsics intr =depth_frame.get_profile().as<rs2::video_stream_profile>().get_intrinsics(); // Calibration data
    rs2_deproject_pixel_to_point(upoint, &intr, upixel, udist);
    rs2_deproject_pixel_to_point(vpoint, &intr, vpixel, vdist);

    // Calculate euclidean distance between the two points
    float dist_points_pow2 = pow(upoint[0] - vpoint[0], 2.f) + pow(upoint[1] - vpoint[1], 2.f) + pow(upoint[2] - vpoint[2], 2.f);
    horizon_dist_to_target = sqrt(vdist_pow2 - pow((udist_pow2 - vdist_pow2 - dist_points_pow2), 2.f) / 4 / dist_points_pow2);

    // std::cout << ">>> len of two point is: " << sqrt(dist_points_pow2) << " m" << std::endl;
}

void MyTracker::getTargetPose(const TargetBox &box, float &theta, float &d)
{
    if(tracking_type == TRACKING_TYPE_LIDAR)
    {
        theta = minDisAngle;
        d = minDis;
    }
    else
    {
        /* calc theta and distance */
        int box_center_x = box.x1 + (box.x2 - box.x1) / 2;
        theta = cam.calcTheta(box_center_x); //range [-pi, pi)
#if 0
        if(abs(theta - lastTheta) / 3.1415 * 180 > 45) {
            std::cout << ">>> theta is: " << theta << ", lastTheta is: " << lastTheta << std::endl;
            theta = lastTheta;
        }
        lastTheta = theta;
#endif

        float horizon_dist_to_target = 0;
        cv::Rect target_rect(box.x1, box.y1, box.x2 - box.x1, box.y2 - box.y1);
        getTargetDistance(target_rect, horizon_dist_to_target);
        d = horizon_dist_to_target;

        RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "image tracking [theta,d] = [%0.3f rad, %0.3f m]", theta, horizon_dist_to_target);
    }
}

// 发布目标信息
void MyTracker::publishTargetInfo(const std::vector<TargetBox>& target_boxes)
{
    // 遍历目标框，发布每个目标的信息
    for (const auto &box : target_boxes)
    {
        // 获取目标的角度和距离
        float theta, d;
        getTargetPose(box, theta, d);

        // 打印目标信息
        printf("Detected target: %d at [x1: %d, y1: %d] to [x2: %d, y2: %d] with angle: %.3f radians and distance: %.3f meters\n",
               box.category, box.x1, box.y1, box.x2, box.y2, theta, d);

        // 创建消息对象

        msg.x = box.category;
       
       


        msg.y = theta;
        msg.z = d;

        // 发布目标信息
        target_info_pub->publish(msg);  // 发布目标信息到ROS话题
    }
}

void MyTracker::stopTracking(void)
{
    if(tracking_type == TRACKING_TYPE_LIDAR)
    {
    }
    else
    {
        cam.stop();
    }
}

bool MyTracker::cvObjectDetection(const cv::Mat &m, std::vector<TargetBox> &target_boxes)
{
    bool ret = false;

    cv::Mat frame;
    cam.frameLock();
    m.copyTo(frame);
    cam.frameUnlock();

    int frame_width = frame.cols;
    int frame_height = frame.rows;

    // resize of input image data
    ncnn::Mat input = ncnn::Mat::from_pixels_resize(frame.data, ncnn::Mat::PIXEL_BGR,\
                                                    frame.cols, frame.rows, input_width, input_height);
    // Normalization of input image data
    const float mean_vals[3] = {0.f, 0.f, 0.f};
    const float norm_vals[3] = {1/255.f, 1/255.f, 1/255.f};
    input.substract_mean_normalize(mean_vals, norm_vals);

    double start = ncnn::get_current_time();

    // creat extractor
    ncnn::Extractor ex = net.create_extractor();

    //set input tensor
    ex.input(NCNN_INPUT_NAME, input);

    // get output tensor
    ncnn::Mat output;
    ex.extract(NCNN_OUTPUT_NAME, output);

    // handle output tensor
    std::vector<TargetBox> output_boxes;

    for (int h = 0; h < output.h; h++)
    {
        for (int w = 0; w < output.w; w++)
        {
            //Prospect probability
            int obj_score_index = (0 * output.h * output.w) + (h * output.w) + w;
            float obj_score = output[obj_score_index];

            //Analytic class
            int category;
            float max_score = 0.0f;
            for (size_t i = 0; i < labelsIndexToDetect.size(); i++)
            {
                int obj_score_index = ((5 + labelsIndexToDetect[i]) * output.h * output.w) + (h * output.w) + w;
                float cls_score = output[obj_score_index];
                if (cls_score > max_score)
                {
                    max_score = cls_score;
                    category = labelsIndexToDetect[i];
                }
            }
            float score = pow(max_score, 0.4) * pow(obj_score, 0.6);

            //Threshold filtering
            if(score > thresh)
            {
                //Analytic coordinate
                int x_offset_index = (1 * output.h * output.w) + (h * output.w) + w;
                int y_offset_index = (2 * output.h * output.w) + (h * output.w) + w;
                int box_width_index = (3 * output.h * output.w) + (h * output.w) + w;
                int box_height_index = (4 * output.h * output.w) + (h * output.w) + w;

                float x_offset = Tanh(output[x_offset_index]);
                float y_offset = Tanh(output[y_offset_index]);
                float box_width = Sigmoid(output[box_width_index]);
                float box_height = Sigmoid(output[box_height_index]);

                float cx = (w + x_offset) / output.w;
                float cy = (h + y_offset) / output.h;

                int x1 = (int)((cx - box_width * 0.5) * frame_width);
                int y1 = (int)((cy - box_height * 0.5) * frame_height);
                int x2 = (int)((cx + box_width * 0.5) * frame_width);
                int y2 = (int)((cy + box_height * 0.5) * frame_height);

                output_boxes.push_back(TargetBox{x1, y1, x2, y2, category, score});
            }
        }

    }

    // NMS processing
    std::vector<TargetBox> nms_boxes;
    nmsHandle(output_boxes, nms_boxes, frame.rows, frame.cols);
    target_boxes = nms_boxes;
    cv::Mat result_frame; // 存储结果图像
    frame.copyTo(result_frame); // 将当前帧复制到结果帧中

    //Printing time
    double end = ncnn::get_current_time();
    double time = end - start;
    // printf("Time:%7.2f ms\n",time);

    // draw result
    for (size_t i = 0; i < nms_boxes.size(); i++)
    {
        TargetBox box = nms_boxes[i];
        cv::rectangle(result_frame, cv::Point(box.x1, box.y1), cv::Point(box.x2, box.y2), cv::Scalar(0, 0, 255), 2);
    }

    if(nms_boxes.size() == 0)
    {
        std::cout << "No objects detected." << std::endl;
        return false;
    }

    // 输出检测到的所有目标信息
    for (size_t i = 0; i < nms_boxes.size(); i++)
    {
        std::cout << "Detected object: " << nms_boxes[i].category << " at ["
                  << nms_boxes[i].x1 << "," << nms_boxes[i].y1 << "] to ["
                  << nms_boxes[i].x2 << "," << nms_boxes[i].y2 << "] with score: "
                  << nms_boxes[i].score << std::endl;
    }

    return true;
}