#include "co_detr_ros/tensorrt_detector.h"
#include <fstream>
#include <algorithm>
#include <opencv2/opencv.hpp>
#include <NvInfer.h>
#include <NvOnnxParser.h>
#include <cuda_runtime_api.h>
#include <memory>
#include <string>
#include <vector>
#include <dlfcn.h>
#include <NvInferPlugin.h>
#include <NvInfer.h>
#include <chrono>

// Logger for TensorRT
class Logger : public nvinfer1::ILogger {
    void log(Severity severity, const char* msg) noexcept override {
        if (severity != Severity::kINFO)
            std::cout << msg << std::endl;
    }
} gLogger;

TensorRTDetector::TensorRTDetector(const std::string& model_path, const std::string& plugin_path)
    : model_path_(model_path), plugin_path_(plugin_path), initialized_(false) {
    std::cout << "TensorRTDetector created with model: " << model_path << std::endl;
}

TensorRTDetector::~TensorRTDetector() {
    if (context_) context_.reset();
    if (engine_) engine_.reset();
    if (runtime_) runtime_.reset();
    std::cout << "TensorRTDetector destroyed" << std::endl;
}

bool TensorRTDetector::initialize() {
    std::cout << "Initializing TensorRT detector..." << std::endl;
    
    try {
        // Load plugin BEFORE creating TensorRT runtime
        if (!plugin_path_.empty()) {
            std::cout << "Loading plugin from: " << plugin_path_ << std::endl;
            void* handle = dlopen(plugin_path_.c_str(), RTLD_LAZY);
            if (!handle) {
                std::cerr << "Failed to load plugin: " << dlerror() << std::endl;
                std::cerr << "Continuing without plugin..." << std::endl;
            } else {
                std::cout << "Plugin loaded successfully" << std::endl;
                
                // Plugin should auto-register when loaded
                // The REGISTER_TENSORRT_PLUGIN macro should handle registration
                std::cout << "Plugin loaded successfully - auto-registration should be active" << std::endl;
                
                // Plugin registration verification removed for simplicity
                std::cout << "Plugin loaded successfully" << std::endl;
                
                // Initialize TensorRT plugins
                initLibNvInferPlugins(&gLogger, "");
                
                // Note: We don't close the handle as TensorRT needs it
            }
        }
        
        // Create TensorRT runtime AFTER plugin loading
        runtime_ = TRTUniquePtr<nvinfer1::IRuntime>(nvinfer1::createInferRuntime(gLogger));
        if (!runtime_) {
            std::cerr << "Failed to create TensorRT runtime" << std::endl;
            return false;
        }
        
        // Load engine from file
        std::ifstream engine_file(model_path_, std::ios::binary);
        if (!engine_file.good()) {
            std::cerr << "Failed to open engine file: " << model_path_ << std::endl;
            return false;
        }
        
        engine_file.seekg(0, std::ios::end);
        size_t engine_size = engine_file.tellg();
        engine_file.seekg(0, std::ios::beg);
        
        std::vector<char> engine_data(engine_size);
        engine_file.read(engine_data.data(), engine_size);
        engine_file.close();
        
        // Create engine
        engine_ = TRTUniquePtr<nvinfer1::ICudaEngine>(
            runtime_->deserializeCudaEngine(engine_data.data(), engine_size));
        if (!engine_) {
            std::cerr << "Failed to deserialize TensorRT engine" << std::endl;
            return false;
        }
        
        // Create execution context
        context_ = TRTUniquePtr<nvinfer1::IExecutionContext>(engine_->createExecutionContext());
        if (!context_) {
            std::cerr << "Failed to create execution context" << std::endl;
            return false;
        }
        
        // Get input/output dimensions
        input_dims_ = engine_->getBindingDimensions(0);
        output_dims_ = engine_->getBindingDimensions(1);
        
        std::cout << "✅ TensorRT engine initialized successfully" << std::endl;
        std::cout << "Input dimensions: ";
        for (int i = 0; i < input_dims_.nbDims; ++i) {
            std::cout << input_dims_.d[i] << " ";
        }
        std::cout << std::endl;
        
        std::cout << "Output dimensions: ";
        for (int i = 0; i < output_dims_.nbDims; ++i) {
            std::cout << output_dims_.d[i] << " ";
        }
        std::cout << std::endl;
        
        initialized_ = true;
        return true;
        
    } catch (const std::exception& e) {
        std::cerr << "Exception during initialization: " << e.what() << std::endl;
        return false;
    }
}

std::vector<float> TensorRTDetector::inferenceFromFile(const std::string& input_file_path) {
    if (!initialized_) {
        std::cerr << "TensorRT detector not initialized" << std::endl;
        return {};
    }
    
    // Load input data
    std::vector<float> input_data;
    if (!loadInputData(input_file_path, input_data)) {
        std::cerr << "Failed to load input data from: " << input_file_path << std::endl;
        return {};
    }
    
    // Check if we have a real TensorRT engine
    if (engine_ && context_) {
        return performRealInference(input_data);
    } else {
        std::cerr << "TensorRT engine or context not available" << std::endl;
        return {};
    }
}

std::vector<float> TensorRTDetector::performRealInference(const std::vector<float>& input_data) {
    try {
        // Start timing
        auto start_time = std::chrono::high_resolution_clock::now();
        
        // Allocate GPU memory
        size_t input_size = input_data.size() * sizeof(float);
        size_t output_size = getOutputSize();
        
        // Check if input size matches expected size
        if (input_data.size() != getInputElementCount()) {
            std::cerr << "Input size mismatch. Expected: " << getInputElementCount() 
                      << ", Got: " << input_data.size() << std::endl;
            return {};
        }
        
        // Allocate buffers for all bindings (input, bboxes output, labels output)
        void* buffers[3];
        cudaError_t cuda_status = cudaMalloc(&buffers[0], input_size);
        if (cuda_status != cudaSuccess) {
            std::cerr << "Failed to allocate input GPU memory: " << cudaGetErrorString(cuda_status) << std::endl;
            return {};
        }
        
        // Allocate bboxes output buffer (float32)
        size_t bboxes_size = getOutputElementCount() * sizeof(float);
        cuda_status = cudaMalloc(&buffers[1], bboxes_size);
        if (cuda_status != cudaSuccess) {
            std::cerr << "Failed to allocate bboxes GPU memory: " << cudaGetErrorString(cuda_status) << std::endl;
            cudaFree(buffers[0]);
            return {};
        }
        
        // Allocate labels output buffer (int32)
        size_t labels_size = 300 * sizeof(int32_t);  // 300 labels
        cuda_status = cudaMalloc(&buffers[2], labels_size);
        if (cuda_status != cudaSuccess) {
            std::cerr << "Failed to allocate labels GPU memory: " << cudaGetErrorString(cuda_status) << std::endl;
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            return {};
        }
        
        // Copy input to GPU
        cuda_status = cudaMemcpy(buffers[0], input_data.data(), input_size, cudaMemcpyHostToDevice);
        if (cuda_status != cudaSuccess) {
            std::cerr << "Failed to copy input to GPU: " << cudaGetErrorString(cuda_status) << std::endl;
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            cudaFree(buffers[2]);
            return {};
        }
        
        // Add CUDA synchronization to ensure memory copy is complete
        cuda_status = cudaDeviceSynchronize();
        if (cuda_status != cudaSuccess) {
            std::cerr << "Failed to synchronize GPU: " << cudaGetErrorString(cuda_status) << std::endl;
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            cudaFree(buffers[2]);
            return {};
        }
        
        // Allocate output buffers
        std::vector<float> bboxes_output(getOutputElementCount());
        std::vector<int32_t> labels_output(300);
        
        // Execute inference
        bool success = context_->executeV2(buffers);
        if (!success) {
            std::cerr << "Inference execution failed" << std::endl;
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            cudaFree(buffers[2]);
            return {};
        }
        
        // Copy bboxes output from GPU
        cuda_status = cudaMemcpy(bboxes_output.data(), buffers[1], bboxes_size, cudaMemcpyDeviceToHost);
        if (cuda_status != cudaSuccess) {
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            cudaFree(buffers[2]);
            return {};
        }
        
        // Copy labels output from GPU
        cuda_status = cudaMemcpy(labels_output.data(), buffers[2], labels_size, cudaMemcpyDeviceToHost);
        if (cuda_status != cudaSuccess) {
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            cudaFree(buffers[2]);
            return {};
        }
        
        // Cleanup
        cudaFree(buffers[0]);
        cudaFree(buffers[1]);
        cudaFree(buffers[2]);
        
        // End timing and print duration
        auto end_time = std::chrono::high_resolution_clock::now();
        auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);
        std::cout << "Inference time: " << duration.count() << " microseconds (" 
                  << duration.count() / 1000.0 << " ms)" << std::endl;
        
        // 自动调用可视化函数保存结果
        std::string output_path = "/data2/xd/Co-DETR-TensorRT/co_detr_ros_ws/src/co_detr_ros/results/inference_result_visualization.jpg";
        bool viz_success = visualizeResults(bboxes_output, output_path);
        if (viz_success) {
            std::cout << "✅ Visualization saved to: " << output_path << std::endl;
        } else {
            std::cout << "❌ Failed to save visualization" << std::endl;
        }
        
        return bboxes_output;
        
    } catch (const std::exception& e) {
        std::cerr << "Exception during real inference: " << e.what() << std::endl;
        return {};
    }
}

bool TensorRTDetector::loadInputData(const std::string& file_path, std::vector<float>& data) {
    std::ifstream file(file_path, std::ios::binary);
    if (!file.good()) {
        return false;
    }
    
    file.seekg(0, std::ios::end);
    size_t file_size = file.tellg();
    file.seekg(0, std::ios::beg);
    
    size_t num_floats = file_size / sizeof(float);
    data.resize(num_floats);
    file.read(reinterpret_cast<char*>(data.data()), file_size);
    file.close();
    
    return true;
}

size_t TensorRTDetector::getOutputSize() {
    size_t size = 1;
    for (int i = 0; i < output_dims_.nbDims; ++i) {
        size *= output_dims_.d[i];
    }
    return size * sizeof(float);
}

size_t TensorRTDetector::getInputElementCount() {
    size_t count = 1;
    for (int i = 0; i < input_dims_.nbDims; ++i) {
        count *= input_dims_.d[i];
    }
    return count;
}

size_t TensorRTDetector::getOutputElementCount() {
    size_t count = 1;
    for (int i = 0; i < output_dims_.nbDims; ++i) {
        count *= output_dims_.d[i];
    }
    return count;
}

float TensorRTDetector::calculateIoU(const std::vector<float>& bbox1, const std::vector<float>& bbox2) {
    // bbox format: [x1, y1, x2, y2]
    float x1 = std::max(bbox1[0], bbox2[0]);
    float y1 = std::max(bbox1[1], bbox2[1]);
    float x2 = std::min(bbox1[2], bbox2[2]);
    float y2 = std::min(bbox1[3], bbox2[3]);
    
    if (x2 < x1 || y2 < y1) {
        return 0.0f;  // No overlap
    }
    
    float intersection = (x2 - x1) * (y2 - y1);
    float area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]);
    float area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]);
    float union_area = area1 + area2 - intersection;
    
    return intersection / union_area;
}

std::vector<float> TensorRTDetector::applyNMS(const std::vector<float>& bboxes, float conf_threshold, float iou_threshold) {
    std::vector<float> filtered_bboxes;
    std::vector<bool> keep(bboxes.size() / 5, true);
    
    // Filter by confidence threshold
    for (size_t i = 0; i < bboxes.size(); i += 5) {
        if (bboxes[i + 4] < conf_threshold) {
            keep[i / 5] = false;
        }
    }
    
    // Apply NMS
    for (size_t i = 0; i < bboxes.size(); i += 5) {
        if (!keep[i / 5]) continue;
        
        std::vector<float> bbox1 = {bboxes[i], bboxes[i + 1], bboxes[i + 2], bboxes[i + 3]};
        
        for (size_t j = i + 5; j < bboxes.size(); j += 5) {
            if (!keep[j / 5]) continue;
            
            std::vector<float> bbox2 = {bboxes[j], bboxes[j + 1], bboxes[j + 2], bboxes[j + 3]};
            
            float iou = calculateIoU(bbox1, bbox2);
            if (iou > iou_threshold) {
                // Keep the one with higher confidence
                if (bboxes[i + 4] < bboxes[j + 4]) {
                    keep[i / 5] = false;
                    break;
                } else {
                    keep[j / 5] = false;
                }
            }
        }
    }
    
    // Collect kept bboxes
    for (size_t i = 0; i < bboxes.size(); i += 5) {
        if (keep[i / 5]) {
            for (int j = 0; j < 5; ++j) {
                filtered_bboxes.push_back(bboxes[i + j]);
            }
        }
    }
    
    return filtered_bboxes;
}

bool TensorRTDetector::visualizeResults(const std::vector<float>& output_data, 
                                       const std::string& output_path) {
    try {
        std::cout << "Starting visualization of " << output_data.size() << " output values..." << std::endl;
        
        // Apply NMS to filter detections
        std::vector<float> nms_results = applyNMS(output_data, 0.3f, 0.5f);
        std::cout << "After NMS: " << nms_results.size() / 5 << " detections" << std::endl;
        
        // Load the original image for visualization
        std::string original_image_path = "/data2/xd/Co-DETR-TensorRT/co_detr_ros_ws/src/co_detr_ros/data/scene-1_000003.jpg";
        cv::Mat original_image = cv::imread(original_image_path);
        
        if (original_image.empty()) {
            std::cout << "Original image not found, creating blank image..." << std::endl;
            // Fallback to creating a blank image
            int width = 1333;
            int height = 750;
            cv::Mat image(height, width, CV_8UC3, cv::Scalar(255, 255, 255));
            original_image = image;
        }
        
        // Resize original image to match the processed size (1333x750)
        cv::Mat resized_image;
        cv::resize(original_image, resized_image, cv::Size(1333, 750));
        
        // Draw detection results on the original image
        if (!nms_results.empty()) {
            // Original image dimensions (from the model training)
            float orig_width = 1920.0f;
            float orig_height = 1080.0f;
            // Preprocessed image dimensions
            float prep_width = static_cast<float>(resized_image.cols);
            float prep_height = static_cast<float>(resized_image.rows);
            
            // Calculate scaling factors
            float scale_x = prep_width / orig_width;
            float scale_y = prep_height / orig_height;
            
            std::cout << "Drawing " << nms_results.size() / 5 << " detections..." << std::endl;
            
            for (size_t i = 0; i < nms_results.size(); i += 5) {
                if (i + 4 < nms_results.size()) {
                    float x1 = nms_results[i];
                    float y1 = nms_results[i + 1];
                    float x2 = nms_results[i + 2];
                    float y2 = nms_results[i + 3];
                    float conf = nms_results[i + 4];
                    
                    // Scale coordinates from original image size to preprocessed image size
                    x1 *= scale_x;
                    y1 *= scale_y;
                    x2 *= scale_x;
                    y2 *= scale_y;
                    
                    // Ensure coordinates are within image bounds
                    x1 = std::max(0.0f, std::min(prep_width, x1));
                    y1 = std::max(0.0f, std::min(prep_height, y1));
                    x2 = std::max(0.0f, std::min(prep_width, x2));
                    y2 = std::max(0.0f, std::min(prep_height, y2));
                    
                    // Only draw if the box is valid (has positive area)
                    if (x2 > x1 && y2 > y1) {
                        // Draw bounding box with green color
                        cv::rectangle(resized_image, 
                                    cv::Point(static_cast<int>(x1), static_cast<int>(y1)),
                                    cv::Point(static_cast<int>(x2), static_cast<int>(y2)),
                                    cv::Scalar(0, 255, 0), 3);
                        
                        // Draw confidence text with white background
                        std::string conf_text = "Conf: " + std::to_string(conf).substr(0, 4);
                        int baseline = 0;
                        cv::Size text_size = cv::getTextSize(conf_text, cv::FONT_HERSHEY_SIMPLEX, 0.6, 2, &baseline);
                        cv::rectangle(resized_image, 
                                    cv::Point(static_cast<int>(x1), static_cast<int>(y1) - text_size.height - 10),
                                    cv::Point(static_cast<int>(x1) + text_size.width + 10, static_cast<int>(y1)),
                                    cv::Scalar(255, 255, 255), -1);
                        cv::putText(resized_image, conf_text, 
                                  cv::Point(static_cast<int>(x1) + 5, static_cast<int>(y1) - 5),
                                  cv::FONT_HERSHEY_SIMPLEX, 0.6, cv::Scalar(0, 255, 0), 2);
                        
                        // Draw detection number with white background
                        std::string det_text = "Det " + std::to_string(i / 5 + 1);
                        text_size = cv::getTextSize(det_text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 2, &baseline);
                        cv::rectangle(resized_image, 
                                    cv::Point(static_cast<int>(x1), static_cast<int>(y2)),
                                    cv::Point(static_cast<int>(x1) + text_size.width + 10, static_cast<int>(y2) + text_size.height + 10),
                                    cv::Scalar(255, 255, 255), -1);
                        cv::putText(resized_image, det_text, 
                                  cv::Point(static_cast<int>(x1) + 5, static_cast<int>(y2) + text_size.height + 5),
                                  cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(255, 0, 0), 2);
                    }
                }
            }
        } else {
            std::cout << "No valid detections found after NMS" << std::endl;
        }
        
        // Add title and information overlay
        std::string title_text = "Co-DETR TensorRT Inference Results (NMS Applied)";
        std::string info_text = "Detections: " + std::to_string(nms_results.size() / 5) + 
                               " (Conf > 0.3, IoU > 0.8)";
        std::string model_info = "Model: Co-DETR TensorRT Engine";
        
        // Add semi-transparent overlay for text
        cv::Mat overlay = resized_image.clone();
        cv::rectangle(overlay, cv::Point(10, 10), cv::Point(700, 140), cv::Scalar(0, 0, 0), -1);
        cv::addWeighted(overlay, 0.7, resized_image, 0.3, 0, resized_image);
        
        cv::putText(resized_image, title_text, 
                   cv::Point(20, 50), cv::FONT_HERSHEY_SIMPLEX, 0.8, 
                   cv::Scalar(255, 255, 255), 2);
        cv::putText(resized_image, info_text, 
                   cv::Point(20, 90), cv::FONT_HERSHEY_SIMPLEX, 0.6, 
                   cv::Scalar(255, 255, 255), 2);
        cv::putText(resized_image, model_info, 
                   cv::Point(20, 120), cv::FONT_HERSHEY_SIMPLEX, 0.5, 
                   cv::Scalar(255, 255, 255), 1);
        
        // Save the image
        bool save_success = cv::imwrite(output_path, resized_image);
        if (save_success) {
            std::cout << "✅ Visualization saved successfully to: " << output_path << std::endl;
        } else {
            std::cout << "❌ Failed to save visualization to: " << output_path << std::endl;
        }
        
        return save_success;
        
    } catch (const std::exception& e) {
        std::cerr << "❌ Error during visualization: " << e.what() << std::endl;
        return false;
    }
}

bool TensorRTDetector::preprocessImage(const std::string& image_path, std::vector<float>& processed_data) {
    try {
        // 1. 读取原始图像
        cv::Mat image = cv::imread(image_path);
        if (image.empty()) {
            return false;
        }
        
        // 2. 调整图像尺寸到目标尺寸 (1333, 750)
        cv::Size target_size(1333, 750);
        cv::Mat resized_image;
        cv::resize(image, resized_image, target_size, 0, 0, cv::INTER_LINEAR);
        
        // 3. 保持BGR颜色空间（与input_data.bin一致）
        cv::Mat bgr_image = resized_image;  // 直接使用BGR格式
        
        // 4. 转换为float32类型
        cv::Mat float_image;
        bgr_image.convertTo(float_image, CV_32F);
        
        // 5. 调整维度顺序 (H, W, C) -> (C, H, W)
        std::vector<cv::Mat> channels(3);
        cv::split(float_image, channels);
        
        // 6. 添加batch维度并组织数据
        size_t expected_size = 1 * 3 * 750 * 1333;
        processed_data.resize(expected_size);
        
        // 将数据按 (1, C, H, W) 格式组织
        size_t idx = 0;
        for (int c = 0; c < 3; ++c) {  // BGR通道
            for (int h = 0; h < 750; ++h) {
                for (int w = 0; w < 1333; ++w) {
                    processed_data[idx++] = channels[c].at<float>(h, w);
                }
            }
        }
        
        // 验证尺寸是否正确
        if (processed_data.size() != expected_size) {
            return false;
        }
        
        return true;
        
    } catch (const std::exception& e) {
        return false;
    }
}

std::vector<float> TensorRTDetector::inferenceFromImage(const std::string& image_path) {
    if (!initialized_) {
        std::cerr << "TensorRT detector not initialized" << std::endl;
        return {};
    }
    
    try {
        // 预处理图像
        std::vector<float> processed_data;
        if (!preprocessImage(image_path, processed_data)) {
            std::cerr << "Failed to preprocess image: " << image_path << std::endl;
            return {};
        }
        
        // 执行推理
        std::vector<float> result;
        if (engine_ && context_) {
            result = performRealInference(processed_data);
        } else {
            std::cerr << "TensorRT engine or context not available" << std::endl;
            return {};
        }
        
        // 为图像推理生成专门的可视化结果
        std::string output_path = "/data2/xd/Co-DETR-TensorRT/co_detr_ros_ws/src/co_detr_ros/results/image_inference_result_visualization.jpg";
        bool viz_success = visualizeResults(result, output_path);
        if (viz_success) {
            std::cout << "✅ Image inference visualization saved to: " << output_path << std::endl;
        } else {
            std::cout << "❌ Failed to save image inference visualization" << std::endl;
        }
        
        return result;
        
    } catch (const std::exception& e) {
        std::cerr << "Exception during image inference: " << e.what() << std::endl;
        return {};
    }
}

std::pair<std::vector<float>, long long> TensorRTDetector::inferenceFromImageWithTime(const std::string& image_path) {
    try {
        // 预处理图像
        std::vector<float> processed_data;
        if (!preprocessImage(image_path, processed_data)) {
            return {{}, 0};
        }
        
        // 执行推理并记录时间
        std::vector<float> result;
        if (engine_ && context_) {
            result = performRealInference(processed_data);
        } else {
            // If engine is not initialized, return empty vector and 0 time
            return {{}, 0}; 
        }
        
        // 返回结果和推理时间（从performRealInference中获取）
        return {result, 0}; // 时间在performRealInference中已经打印
    } catch (const std::exception& e) {
        return {{}, 0};
    }
}
