// #include "buffers.h"
// #include "common.h"
// #include "logger.h"
#include "NvInfer.h"
// #include "argsParser.h"
#include "NvInferPlugin.h"
#include "ToolFunction.hpp"
#include "time_tool.h"
#include <yaml-cpp/yaml.h>

#include <ros/ros.h>
#include <opencv2/opencv.hpp>
#include <cv_bridge/cv_bridge.h>
#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/time_synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <image_transport/image_transport.h>
#include <sensor_msgs/CompressedImage.h>
#include <perception_fuse_msgs/detected_object.h>
#include <perception_fuse_msgs/detected_object_array.h>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <memory>


#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/sem.h>

ros::Publisher yolov5_trt_rcs;
ros::Publisher detection_publisher;  // 添加新的发布者

using namespace std;
using namespace nvinfer1;
IRuntime *runtime;
ICudaEngine *re_engine;
IExecutionContext *backbone_context;


class Logger:public ILogger{
    void log(Severity severity,const char*msg) noexcept override{
        if(severity<Severity::kWARNING)
            std::cout<<msg<<std::endl;
    };
}logger;


void onImageCallback_img1(const sensor_msgs::CompressedImageConstPtr &msg)
{
    std::cout << "into callback-----------" << std::endl;
    try {
        std::cout << "into callback" << std::endl;
        int64_t start_time = TimeTool::Now2Ms();
        
        // 图像转换
        cv_bridge::CvImagePtr cv_ptr;
        try {
            cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
        } catch (cv_bridge::Exception &e) {
            ROS_ERROR("cv_bridge exception: %s", e.what());
            return;
        }
        cv::Mat image = cv_ptr->image;

        // 预处理
        std::vector<float> pad = {114, 114, 114};
        std::vector<float> result = letterbox(image, 640, 640, pad);

        // 分配内存
        std::unique_ptr<float[]> out_pred(new float[25200 * 6]);
        void *buffers[2];
        
        // CUDA内存分配
        cudaError_t cuda_status;
        cuda_status = cudaMalloc(&buffers[0], 1 * 3 * 640 * 640 * sizeof(float));
        if (cuda_status != cudaSuccess) {
            ROS_ERROR("CUDA memory allocation failed: %s", cudaGetErrorString(cuda_status));
            return;
        }
        
        cuda_status = cudaMalloc(&buffers[1], 1 * 25200 * 6 * sizeof(float));
        if (cuda_status != cudaSuccess) {
            cudaFree(buffers[0]);
            ROS_ERROR("CUDA memory allocation failed: %s", cudaGetErrorString(cuda_status));
            return;
        }

        // 数据传输
        cuda_status = cudaMemcpyAsync(buffers[0], result.data(), 640 * 640 * 3 * sizeof(float), 
                                    cudaMemcpyHostToDevice, nullptr);
        if (cuda_status != cudaSuccess) {
            ROS_ERROR("CUDA memory copy failed: %s", cudaGetErrorString(cuda_status));
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            return;
        }

        // 推理
        bool execute_bool = backbone_context->executeV2(buffers);
        if (!execute_bool) {
            ROS_ERROR("Inference execution failed");
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            return;
        }

        // 结果获取
        cuda_status = cudaMemcpyAsync(out_pred.get(), buffers[1], 25200 * 6 * sizeof(float), 
                                    cudaMemcpyDeviceToHost, nullptr);
        if (cuda_status != cudaSuccess) {
            ROS_ERROR("CUDA memory copy failed: %s", cudaGetErrorString(cuda_status));
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            return;
        }
        cudaDeviceSynchronize();

        // NMS处理
        std::vector<Detection> out = nms(out_pred.get(), pad, 0.4, 0.25);
        std::cout << "Detection size: "<< out.size() << std::endl;

        // 绘制结果
        const std::vector<std::string> class_names = {"car", "truck", "pedestrian", "traffic_cone", "forklift", "tractor", "flatbed","trailer","unknown"};
        const std::vector<cv::Scalar> colors = {
            cv::Scalar(255, 255, 255),  // car - white
            cv::Scalar(0, 255, 0),      // truck - green
            cv::Scalar(0, 0, 255),      // pedestrian - red
            cv::Scalar(0, 255, 255),    // traffic_cone - yellow
            cv::Scalar(255, 0, 0),      // forklift - blue
            cv::Scalar(255, 0, 255),    // tractor - purple
            cv::Scalar(255, 128, 0),    // flatbed - orange
            cv::Scalar(128, 0, 255),    // trailer - pink
            cv::Scalar(0, 128, 255),    // unknown - light blue
        };

        // 设置文本参数
        const int font_face = cv::FONT_HERSHEY_SIMPLEX;
        const double font_scale = 0.5;
        const int thickness = 1;

        // 创建检测结果消息
        auto detected_objects = boost::make_shared<perception_fuse_msgs::detected_object_array>();
        detected_objects->header = msg->header;
        detected_objects->objects.clear();

        for (const auto& detection : out) {
            const int x1 = static_cast<int>(detection.bbox[0]);
            const int y1 = static_cast<int>(detection.bbox[1]);
            const int x2 = static_cast<int>(detection.bbox[2]);
            const int y2 = static_cast<int>(detection.bbox[3]);
            
            const int class_id = static_cast<int>(detection.class_id + 0.5);
            
            // 确保类别ID在有效范围内
            const int safe_class_id = (class_id >= 0 && class_id < class_names.size()) ? class_id : 3;
            const cv::Scalar color = colors[safe_class_id];
            const std::string& class_string = class_names[safe_class_id];

            // 绘制矩形框
            cv::rectangle(image, cv::Point(x1, y1), cv::Point(x2, y2), color, thickness);

            // 准备标签文本
            std::stringstream ss;
            ss << class_string << " " << std::fixed << std::setprecision(2) 
               << (detection.conf * 100) << "%";
            const std::string label = ss.str();

            // 计算文本大小
            int baseline = 0;
            cv::Size label_size = cv::getTextSize(label, font_face, font_scale, thickness, &baseline);

            // 绘制背景
            cv::rectangle(image, 
                         cv::Point(x1, y1 - label_size.height - 5),
                         cv::Point(x1 + label_size.width, y1),
                         color, -1);

            // 绘制文本
            cv::putText(image, label,
                       cv::Point(x1, y1 - 5),
                       font_face, font_scale, cv::Scalar(0, 0, 0), thickness);

            // 创建检测对象
            perception_fuse_msgs::detected_object obj;
            obj.score = detection.conf;
            obj.label = class_string;
            obj.x = x1;  // 左上角x坐标
            obj.y = y1;  // 左上角y坐标
            obj.width = x2 - x1;      // 宽度
            obj.height = y2 - y1;     // 高度

            detected_objects->objects.push_back(obj);
        }

        // 发布检测结果
        std::cout << "detected_objects->objects size  " << detected_objects->objects.size() << std::endl;
        detection_publisher.publish(detected_objects);

        // 发布结果
        cv_bridge::CvImage output_msg;
        output_msg.header.stamp = ros::Time::now();
        output_msg.encoding = sensor_msgs::image_encodings::BGR8;
        output_msg.image = image;
        yolov5_trt_rcs.publish(output_msg.toImageMsg());

        // 清理CUDA内存
        cudaFree(buffers[0]);
        cudaFree(buffers[1]);

        int64_t end_time = TimeTool::Now2Ms();
        std::cout << "infer time : " << end_time - start_time << "ms" << std::endl;

    } catch (const std::exception& e) {
        ROS_ERROR("Exception in callback: %s", e.what());
    }
}

int main(int argc, char *argv[])
{
    ros::init(argc, argv, "yolov5");
    ros::NodeHandle nh_;
    
    std::string  model_dir ,image_input_topic,detection_output_topic,yolov5_results_output_topic;
    nh_.param<std::string>("/yolo/model_dir", model_dir, "default");
    nh_.param<std::string>("/yolo/image_channel", image_input_topic, "default");
    nh_.param<std::string>("/yolo/result_channel", detection_output_topic, "default");
    nh_.param<std::string>("/yolo/visual_channel", yolov5_results_output_topic, "default");

    runtime = createInferRuntime(logger);
    std::ifstream fin(model_dir);
    std::string cached_engine = "";
    while (fin.peek() != EOF)
    {
        std::stringstream buffer;
        buffer << fin.rdbuf();
        cached_engine.append(buffer.str());
    }
    fin.close();
    ICudaEngine *re_engine = runtime->deserializeCudaEngine(cached_engine.data(), cached_engine.size(), nullptr);
    backbone_context = re_engine->createExecutionContext();

    ROS_INFO("image_input_topic: %s", image_input_topic.c_str());
    ROS_INFO("yolov5_results_output_topic: %s", yolov5_results_output_topic.c_str());


    ros::Subscriber sub;
    yolov5_trt_rcs = nh_.advertise<sensor_msgs::Image>(yolov5_results_output_topic, 1);
    detection_publisher = nh_.advertise<perception_fuse_msgs::detected_object_array>(detection_output_topic, 1);

    sub = nh_.subscribe<sensor_msgs::CompressedImage>(image_input_topic, 1, &onImageCallback_img1);

    ROS_INFO("init end");
    ros::spin();
    return 0;
}
