#include <iostream>
#include <string>
#include <vector>
#include <map>
#include <chrono>
#include <sstream>

#include <opencv2/opencv.hpp>
#include <Eigen/Dense>

#include "BYTETracker.h"
#include "opencv_utils.h"
#include "colormap_parser.h"

#include "det.h"
#include "det_process.h"
#include "image_utils.h"
#include "file_utils.h"
#include "image_drawing.h"

#include <fstream>
#include <nlohmann/json.hpp>
using namespace std;


void read_json(string label_path,int &class_num,nlohmann::json &label_json){
    ifstream json_file(label_path);
    json_file >> label_json;
    class_num = static_cast<int>(label_json.size());
    json_file.close();
}

static int read_image_from_frame(cv::Mat& frame, image_buffer_t* image)
{
    if (frame.empty()) {
        printf("Input frame is empty\n");
        return -1;
    }

    int width = frame.cols;
    int height = frame.rows;
    int channels = frame.channels();

    printf("Input image: %d x %d x %d\n", width, height, channels);

    // 假设输入帧为BGR格式，需要转换为RGB格式
    if (channels == 3) {
        cv::cvtColor(frame, frame, cv::COLOR_BGR2RGB);
    }

    int image_size = width * height * channels;
    std::vector<unsigned char> image_data(image_size);

    memcpy(image_data.data(), frame.data, image_size);

    // 设置image_buffer_t结构
    image->width = width;
    image->height = height;
    image->format = IMAGE_FORMAT_RGB888; // 根据需要调整格式
    image->virt_addr = image_data.data();
    image->size = image_size;

    // 确保在image_buffer_t不再使用时正确释放image_data
    // 注意：调用方需要负责这部分逻辑

    return 0;
}


int main(){

    const char *model_path = "../model/det/person_head/det_head_person_960.rknn";
    const char *video_path = "rtsp://192.168.10.105/av_stream/0";
    // const char *video_path = "/mnt/nfs_share/rk_c++/test/person.mp4";
    string label_path = "../model/det/person_head/label.json";
    std::string colormap_filepath = "../model/map.json";
    // 默认的NMS阈值
    float nms_thresh = 0.25;
    // 默认的置信度阈值
    float box_thresh = 0.25;

    // // Initialize a ByteTracker object
    BYTETracker tracker(0.25, 30, 0.65, 30); 

    int class_num ;
    nlohmann::json label_json;
    read_json(label_path,class_num,label_json);
    int ret;
    // 初始化模型
    rknn_app_context_t rknn_app_ctx;
    memset(&rknn_app_ctx, 0, sizeof(rknn_app_context_t));

    ret = init_yolov8_model(model_path, &rknn_app_ctx);
    int a = 1;

    if (ret != 0)
    {
        printf("init_yolov8_model fail! ret=%d model_path=%s\n", ret, model_path);
        ret = release_yolov8_model(&rknn_app_ctx);
        if (ret != 0)
        {
            printf("release_yolov8_model fail! ret=%d\n", ret);
        }

    }


    // std::cout << cv::getBuildInformation() << std::endl;

    // Initialize VideoCapture
    // cv::VideoCapture cap("/mnt/nfs_share/rk_c++/test/hbb.mp4");
    cv::VideoCapture cap(video_path);

    // std::string pipeline = "filesrc location=/mnt/nfs_share/rk_c++/test/hbb.mp4 ! "
    //                    "qtdemux ! h264parse ! mppvideodec ! "
    //                    "videoconvert ! video/x-raw, format=(string)BGR ! appsink sync=false";

    // std::string pipeline = "rtspsrc location=rtsp://192.168.10.105/av_stream/0 ! rtph264depay ! avdec_h264 ! videoconvert ! appsink";


    // // 使用 OpenCV 打开视频流
    // cv::VideoCapture cap(pipeline, cv::CAP_GSTREAMER);

    if (!cap.isOpened()) {
        std::cout << "Error: Could not open the video file." << std::endl;
        return -1;
    }   
    double frame_width = cap.get(cv::CAP_PROP_FRAME_WIDTH);   // Get the width of the video
    double frame_height = cap.get(cv::CAP_PROP_FRAME_HEIGHT); // Get the height of the video
    double fps = cap.get(cv::CAP_PROP_FPS);  // Get the FPS of the input video
    double total_frames = cap.get(cv::CAP_PROP_FRAME_COUNT);
    std::cout << std::endl;
    std::cout << "Video Info:" << std::endl;
    std::cout << "Frame Width: " << frame_width << std::endl;
    std::cout << "Frame Height: " << frame_height << std::endl;
    std::cout << "FPS: " << fps << std::endl;
    std::cout << "Frames: " << total_frames << std::endl;
    cv::Mat frame;
    image_buffer_t src_image;
    memset(&src_image, 0, sizeof(image_buffer_t));

    // Load the JSON colormap
    std::map<std::string, std::vector<int>> colormap_dict;
    std::vector<std::string> class_names;
    std::vector<std::vector<int>> int_colors;
    parse_colormap(colormap_filepath, colormap_dict, class_names, int_colors);
    // Prepare an input image for inference
    cv::Mat resized_img;
    // 指定大小调整
    // cv::Size size(640, 640); 
    cv::Size size(960, 960); 
    while (true) {
        // 读取下一帧
        if (!cap.read(frame)) {
            std::cerr << "Video has ended or failed" << std::endl;
            ret = release_yolov8_model(&rknn_app_ctx);
            if (ret != 0)
            {
                printf("release_yolov8_model fail! ret=%d\n", ret);
            }

            if (src_image.virt_addr != NULL)
            {
                free(src_image.virt_addr);
            }
            break;
        }
        if (frame.empty()) {
            break;
        } 
        cv::resize(frame, resized_img, size);
        int ret = read_image_from_frame(resized_img, &src_image);

        // 图片前处理
        rknn_input inputs[rknn_app_ctx.io_num.n_input];
        memset(inputs, 0, sizeof(inputs));
        letterbox_t letter_box;
        memset(&letter_box, 0, sizeof(letterbox_t));
        ret = pre_process(&rknn_app_ctx,&src_image,inputs,&letter_box);
        if (ret != 0)
        {
            printf("pre_process fail! ret=%d\n", ret);
            return -1;
        }


        // 推理
        rknn_output outputs[rknn_app_ctx.io_num.n_output];
        memset(outputs, 0, sizeof(outputs));
        ret = inference_yolov8_model(&rknn_app_ctx, inputs, outputs);
        if (ret != 0)
        {
            printf("init_yolov8_model fail! ret=%d\n", ret);
            ret = release_yolov8_model(&rknn_app_ctx);
            if (ret != 0)
            {
                printf("release_yolov8_model fail! ret=%d\n", ret);
            }

            if (src_image.virt_addr != NULL)
            {
                free(src_image.virt_addr);
            }
        }

        object_detect_result_list od_results;
        if (!&od_results)
        {
            return -1;
        }
        memset(&od_results, 0x00, sizeof(&od_results));
        post_process(&rknn_app_ctx, outputs, &letter_box, box_thresh, nms_thresh, &od_results, class_num);

        // Remeber to release rknn output
        rknn_outputs_release(rknn_app_ctx.rknn_ctx, rknn_app_ctx.io_num.n_output, outputs);
        //推理完成后释放图像的内存
        if (src_image.virt_addr != NULL)
            {
                free(src_image.virt_addr);
            }

        Eigen::MatrixXf bbox_list(od_results.count, 4);
        Eigen::MatrixXf label_list(od_results.count, 1);
        Eigen::MatrixXf probs_list(od_results.count, 1);

        // 画框和概率
        char text[256];
        for (int i = 0; i < od_results.count; i++)
        {
            object_detect_result *det_result = &(od_results.results[i]);
            const char* label = label_json[std::to_string(det_result->cls_id)].get<std::string>().c_str();
            printf("%s @ (%d %d %d %d) %.3f\n", label,
                det_result->box.left, det_result->box.top,
                det_result->box.right, det_result->box.bottom,
                det_result->prop);
            float x1 = det_result->box.left;
            float y1 = det_result->box.top;
            float x2 = det_result->box.right;
            float y2 = det_result->box.bottom;
            float w = x2-x1;
            float h = y2-y1;
            int label_index = det_result->cls_id;
            float score = det_result->prop;

            bbox_list(i,0)=x1;
            bbox_list(i,1)=y1;
            bbox_list(i,2)=w;
            bbox_list(i,3)=h;
            label_list(i,0)=label_index;
            probs_list(i,0)=score;

        }
        // Update tracker with detections
        Eigen::MatrixXf boxes_probs(od_results.count, 5);
        boxes_probs << bbox_list, probs_list;
        std::vector<KalmanBBoxTrack>tracks = tracker.process_frame_detections(boxes_probs);
        Eigen::MatrixXf tlbr_boxes(bbox_list.rows(), 4);
        // 获取帧的宽度和高度
        tlbr_boxes << bbox_list.col(0),          // top-left X
            bbox_list.col(1),                    // top-left Y
            bbox_list.col(0)+ bbox_list.col(2), // bottom-right X
            bbox_list.col(1) + bbox_list.col(3); // bottom-right Y



        std::vector<int> track_ids = match_detections_with_tracks(tlbr_boxes.cast<double>(), tracks);

        // Annotate the current frame with bounding boxes and tracking IDs
        std::vector<Eigen::VectorXf> filtered_bbox_list;
        std::vector<int> filtered_label_list;
        std::vector<float> filtered_probs_list;
        std::vector<int> filtered_track_ids;
        std::vector<std::string> labels;
        std::vector<cv::Scalar> colors;

        for (int i = 0; i < track_ids.size(); ++i) {
            if (track_ids[i] != -1) {

                // Eigen::VectorXf vecXf(4);
                // double x = tracks[i].tlwh()(0);
                // double y = tracks[i].tlwh()(1);
                // double w = tracks[i].tlwh()(2);
                // double h = tracks[i].tlwh()(3);

                // float xx = static_cast<float>(x);
                // float yy = static_cast<float>(y);
                // float ww = static_cast<float>(w);
                // float hh = static_cast<float>(h);
                // vecXf << xx, yy, ww, hh;
                // filtered_bbox_list.push_back(vecXf);


                filtered_bbox_list.push_back(bbox_list.row(i));
                filtered_label_list.push_back(label_list(i, 0));
                filtered_probs_list.push_back(probs_list(i, 0));
                filtered_track_ids.push_back(track_ids[i]);

                std::string label = class_names[(int)label_list(i, 0)];
                auto it = std::find(class_names.begin(), class_names.end(), label);
                int index = std::distance(class_names.begin(), it);
                cv::Scalar color(int_colors[index][2], int_colors[index][1], int_colors[index][0]);
                colors.push_back(color);
                
                std::ostringstream oss;
                oss << track_ids[i] << "-" << label;
                label = oss.str();
                labels.push_back(label);
            }
        }
        // 遍历每一列
        resized_img = draw_bboxes_opencv(resized_img, filtered_bbox_list, labels, colors, 1, 8, filtered_probs_list);

        // Create a window
        cv::namedWindow("Image Display", cv::WINDOW_AUTOSIZE);

        // Display the image in the window
        cv::imshow("Image Display", resized_img);

        // Wait for a key press indefinitely
        cv::waitKey(1);
    }




    return 0;
}