#include "tracknetv3_trt.h"
#include <cuda_runtime_api.h>
#include <iostream>
#include <fstream>
#include <cassert>

#define CHECK(status) \
    do { \
        auto ret = (status); \
        if (ret != 0) { \
            std::cerr << "Cuda failure: " << ret << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
            abort(); \
        } \
    } while (0)

// 在tracknetv3_trt.cpp中添加getBallCenter函数实现
bool TrackNetV3_TRT::getBallCenter(const cv::Mat& heatmap, int& x, int& y, float threshold) {
    if (heatmap.empty() || heatmap.channels() != 1) {
        std::cerr << "Error: Invalid heatmap for ball center detection" << std::endl;
        return false;
    }
    
    // 找到热力图中的最大值位置
    cv::Point max_loc;
    double max_val;
    cv::minMaxLoc(heatmap, nullptr, &max_val, nullptr, &max_loc);
    
    // 如果最大值低于阈值，则认为没有检测到球
    if (max_val < threshold) {
        x = 0;
        y = 0;
        return false;
    }
    
    x = max_loc.x;
    y = max_loc.y;
    return true;
}

void TrackNetV3_TRT::printDims(const nvinfer1::Dims& dims, const std::string& name) {
    std::cout << name << " dimensions: [";
    for (int i = 0; i < dims.nbDims; ++i) {
        std::cout << dims.d[i];
        if (i < dims.nbDims - 1) std::cout << ", ";
    }
    std::cout << "]" << std::endl;
}

TrackNetV3_TRT::TrackNetV3_TRT() 
    : runtime_(nullptr), engine_(nullptr), context_(nullptr),
      input_buffer_(nullptr), output_buffer_(nullptr),
      input_index_(0), output_index_(0),
      max_batch_size_(1), input_width_(0), input_height_(0), input_channels_(0),
      output_width_(0), output_height_(0), output_channels_(0),
      input_size_(0), output_size_(0) {
    
    std::cout << "Creating TrackNetV3_TRT instance..." << std::endl;
    cudaError_t stream_status = cudaStreamCreate(&stream_);
    if (stream_status != cudaSuccess) {
        std::cerr << "Failed to create CUDA stream: " << cudaGetErrorString(stream_status) << std::endl;
    } else {
        std::cout << "CUDA stream created successfully" << std::endl;
    }
}

TrackNetV3_TRT::~TrackNetV3_TRT() {
    std::cout << "Destroying TrackNetV3_TRT instance..." << std::endl;
    destroy();
    if (stream_) {
        cudaStreamDestroy(stream_);
        std::cout << "CUDA stream destroyed" << std::endl;
    }
}

bool TrackNetV3_TRT::init(const std::string& engine_path) {
    std::cout << "Initializing TrackNetV3 with engine: " << engine_path << std::endl;
    
    // 检查文件是否存在
    std::ifstream engine_file(engine_path, std::ios::binary);
    if (!engine_file) {
        std::cerr << "Error: Unable to open engine file: " << engine_path << std::endl;
        return false;
    }
    
    engine_file.seekg(0, std::ifstream::end);
    size_t size = engine_file.tellg();
    engine_file.seekg(0, std::ifstream::beg);
    
    if (size == 0) {
        std::cerr << "Error: Engine file is empty: " << engine_path << std::endl;
        return false;
    }
    
    std::cout << "Engine file size: " << size << " bytes" << std::endl;
    
    std::unique_ptr<char[]> engine_data(new char[size]);
    engine_file.read(engine_data.get(), size);
    
    if (!engine_file) {
        std::cerr << "Error: Failed to read engine file" << std::endl;
        return false;
    }
    engine_file.close();
    
    // 创建运行时
    runtime_ = nvinfer1::createInferRuntime(logger_);
    if (!runtime_) {
        std::cerr << "Error: Failed to create TensorRT runtime" << std::endl;
        return false;
    }
    std::cout << "TensorRT runtime created successfully" << std::endl;
    
    // 反序列化引擎
    engine_ = runtime_->deserializeCudaEngine(engine_data.get(), size);
    if (!engine_) {
        std::cerr << "Error: Failed to deserialize CUDA engine" << std::endl;
        return false;
    }
    std::cout << "CUDA engine deserialized successfully" << std::endl;
    
    // 创建执行上下文
    context_ = engine_->createExecutionContext();
    if (!context_) {
        std::cerr << "Error: Failed to create execution context" << std::endl;
        return false;
    }
    std::cout << "Execution context created successfully" << std::endl;
    
    // 获取绑定信息
    int num_bindings = engine_->getNbBindings();
    std::cout << "Number of bindings: " << num_bindings << std::endl;
    
    for (int i = 0; i < num_bindings; ++i) {
        const char* name = engine_->getBindingName(i);
        bool is_input = engine_->bindingIsInput(i);
        nvinfer1::Dims dims = engine_->getBindingDimensions(i);
        
        std::cout << "Binding " << i << ": " << name 
                  << " (" << (is_input ? "input" : "output") << ")" << std::endl;
        printDims(dims, name);
        
        if (is_input) {
            input_index_ = i;
            // 解析输入维度
            if (dims.nbDims == 4) {
                max_batch_size_ = dims.d[0];
                input_channels_ = dims.d[1];
                input_height_ = dims.d[2];
                input_width_ = dims.d[3];
            } else {
                std::cerr << "Error: Unexpected input dimensions" << std::endl;
                return false;
            }
        } else {
            output_index_ = i;
            // 解析输出维度
            if (dims.nbDims == 4) {
                output_channels_ = dims.d[1];
                output_height_ = dims.d[2];
                output_width_ = dims.d[3];
            } else {
                std::cerr << "Error: Unexpected output dimensions" << std::endl;
                return false;
            }
        }
    }
    
    // 计算内存大小
    input_size_ = max_batch_size_ * input_channels_ * input_height_ * input_width_ * sizeof(float);
    output_size_ = max_batch_size_ * output_channels_ * output_height_ * output_width_ * sizeof(float);
    
    std::cout << "Input size: " << input_size_ << " bytes" << std::endl;
    std::cout << "Output size: " << output_size_ << " bytes" << std::endl;
    
    // 分配GPU内存
    cudaError_t alloc_input = cudaMalloc(&input_buffer_, input_size_);
    if (alloc_input != cudaSuccess) {
        std::cerr << "Error: Failed to allocate input GPU memory: " << cudaGetErrorString(alloc_input) << std::endl;
        return false;
    }
    
    cudaError_t alloc_output = cudaMalloc(&output_buffer_, output_size_);
    if (alloc_output != cudaSuccess) {
        std::cerr << "Error: Failed to allocate output GPU memory: " << cudaGetErrorString(alloc_output) << std::endl;
        cudaFree(input_buffer_);
        input_buffer_ = nullptr;
        return false;
    }
    
    std::cout << "GPU memory allocated successfully" << std::endl;
    std::cout << "TrackNetV3 initialization completed successfully!" << std::endl;
    std::cout << "Max batch size: " << max_batch_size_ << std::endl;
    std::cout << "Input: " << input_channels_ << "x" << input_height_ << "x" << input_width_ << std::endl;
    std::cout << "Output: " << output_channels_ << "x" << output_height_ << "x" << output_width_ << std::endl;
    
    return true;
}


void TrackNetV3_TRT::preprocess(const cv::Mat& img, float* data, int batch_index, int batch_size) {
    // 注意：现在这个函数需要处理三帧输入
    // 但为了保持接口一致性，我们修改process函数来正确处理多帧
}

// 修改process函数来处理三帧输入
bool TrackNetV3_TRT::process(const std::vector<cv::Mat>& input_imgs, std::vector<cv::Mat>& output_heatmaps) {
    std::cout << "Processing " << input_imgs.size() << " images..." << std::endl;
    
    if (input_imgs.size() < 3) {
        std::cerr << "Error: TrackNetV3 requires at least 3 consecutive frames. Got: " << input_imgs.size() << std::endl;
        return false;
    }
    
    int batch_size = 1;  // 每次处理一个三帧序列
    if (batch_size > max_batch_size_) {
        std::cerr << "Error: Batch size " << batch_size << " exceeds maximum " << max_batch_size_ << std::endl;
        return false;
    }
    
    std::cout << "Batch size: " << batch_size << std::endl;
    std::cout << "Input frames: " << input_imgs.size() << std::endl;
    
    // 准备输入数据 - 9通道 (3帧 × 3通道)
    size_t current_input_size = batch_size * input_channels_ * input_height_ * input_width_ * sizeof(float);
    std::vector<float> input_data(batch_size * input_channels_ * input_height_ * input_width_);
    
    // 预处理三帧图像到9通道
    int channel_size = input_height_ * input_width_;
    int batch_offset = 0;
    
    for (int frame_idx = 0; frame_idx < 3; ++frame_idx) {
        if (frame_idx >= input_imgs.size()) {
            std::cerr << "Error: Not enough frames available" << std::endl;
            return false;
        }
        
        cv::Mat frame = input_imgs[frame_idx];
        std::cout << "Processing frame " << frame_idx + 1 << " - size: " << frame.cols << "x" << frame.rows << std::endl;
        
        // 调整尺寸
        cv::Mat resized;
        try {
            cv::resize(frame, resized, cv::Size(input_width_, input_height_));
            std::cout << "Resized frame to: " << resized.cols << "x" << resized.rows << std::endl;
        } catch (const cv::Exception& e) {
            std::cerr << "OpenCV resize error: " << e.what() << std::endl;
            return false;
        }
        
        // 转换为浮点型并归一化
        cv::Mat normalized;
        resized.convertTo(normalized, CV_32FC3, 1.0 / 255.0);
        
        // 转换为RGB顺序
        cv::Mat rgb;
        cv::cvtColor(normalized, rgb, cv::COLOR_BGR2RGB);
        
        // 分割通道
        std::vector<cv::Mat> channels(3);
        cv::split(rgb, channels);
        
        // 将当前帧的3个通道放入对应的位置
        for (int c = 0; c < 3; ++c) {
            int channel_index = frame_idx * 3 + c;
            float* channel_data = input_data.data() + batch_offset + channel_index * channel_size;
            
            // 检查通道数据是否有效
            if (channels[c].data == nullptr) {
                std::cerr << "Error: Channel data is null" << std::endl;
                return false;
            }
            
            if (channels[c].total() != channel_size) {
                std::cerr << "Error: Channel size mismatch. Expected: " << channel_size 
                          << ", Got: " << channels[c].total() << std::endl;
                return false;
            }
            
            memcpy(channel_data, channels[c].data, channel_size * sizeof(float));
        }
        
        std::cout << "Frame " << frame_idx + 1 << " processed successfully" << std::endl;
    }
    
    std::cout << "All 3 frames processed. Total channels: 9" << std::endl;
    
    // 拷贝数据到GPU
    cudaError_t copy_input = cudaMemcpyAsync(input_buffer_, input_data.data(), 
                                           current_input_size, cudaMemcpyHostToDevice, stream_);
    if (copy_input != cudaSuccess) {
        std::cerr << "Error: Failed to copy input to GPU: " << cudaGetErrorString(copy_input) << std::endl;
        return false;
    }
    
    // 准备绑定数据
    void* bindings[2] = {input_buffer_, output_buffer_};
    
    std::cout << "Executing inference..." << std::endl;
    
    // 执行推理
    bool success = context_->executeV2(bindings);
    if (!success) {
        std::cerr << "Error: Failed to execute inference" << std::endl;
        return false;
    }
    
    std::cout << "Inference executed successfully" << std::endl;
    
    // 从GPU读取结果
    size_t current_output_size = batch_size * output_channels_ * output_height_ * output_width_ * sizeof(float);
    std::vector<float> output_data(batch_size * output_channels_ * output_height_ * output_width_);
    
    cudaError_t copy_output = cudaMemcpyAsync(output_data.data(), output_buffer_, 
                                            current_output_size, cudaMemcpyDeviceToHost, stream_);
    if (copy_output != cudaSuccess) {
        std::cerr << "Error: Failed to copy output from GPU: " << cudaGetErrorString(copy_output) << std::endl;
        return false;
    }
    
    cudaStreamSynchronize(stream_);
    
    std::cout << "Output data copied from GPU successfully" << std::endl;
    
    // 后处理
    output_heatmaps.clear();
    for (int i = 0; i < batch_size; ++i) {
        cv::Mat heatmap(output_height_, output_width_, CV_32FC(output_channels_));
        
        int batch_offset = i * output_channels_ * output_height_ * output_width_;
        
        std::vector<cv::Mat> channels(output_channels_);
        for (int c = 0; c < output_channels_; ++c) {
            float* channel_data = output_data.data() + batch_offset + c * output_height_ * output_width_;
            channels[c] = cv::Mat(output_height_, output_width_, CV_32FC1, channel_data);
        }
        
        cv::merge(channels, heatmap);
        output_heatmaps.push_back(heatmap);
        std::cout << "Generated heatmap " << i << " - size: " << heatmap.cols << "x" << heatmap.rows 
                  << " channels: " << heatmap.channels() << std::endl;
    }
    
    std::cout << "Processing completed successfully" << std::endl;
    return true;
}



void TrackNetV3_TRT::postprocess(float* output, cv::Mat& heatmap, int batch_index, int batch_size) {
    int channel_size = output_height_ * output_width_;
    int batch_offset = batch_index * output_channels_ * channel_size;
    
    std::vector<cv::Mat> channels(output_channels_);
    
    for (int c = 0; c < output_channels_; ++c) {
        float* channel_data = output + batch_offset + c * channel_size;
        channels[c] = cv::Mat(output_height_, output_width_, CV_32FC1, channel_data);
    }
    
    cv::merge(channels, heatmap);
}

bool TrackNetV3_TRT::destroy() {
    std::cout << "Cleaning up resources..." << std::endl;
    
    if (input_buffer_) {
        cudaFree(input_buffer_);
        input_buffer_ = nullptr;
        std::cout << "Input buffer freed" << std::endl;
    }
    
    if (output_buffer_) {
        cudaFree(output_buffer_);
        output_buffer_ = nullptr;
        std::cout << "Output buffer freed" << std::endl;
    }
    
    if (context_) {
        context_->destroy();
        context_ = nullptr;
        std::cout << "Context destroyed" << std::endl;
    }
    
    if (engine_) {
        engine_->destroy();
        engine_ = nullptr;
        std::cout << "Engine destroyed" << std::endl;
    }
    
    if (runtime_) {
        runtime_->destroy();
        runtime_ = nullptr;
        std::cout << "Runtime destroyed" << std::endl;
    }
    
    return true;
}
