#include <iostream>
#include <vector>
#include <memory>
#include <chrono>
#include <cmath>
#include <fstream>
#include <iomanip>
#include <opencv2/opencv.hpp>
#include <NvInfer.h>

// Simple logger class
class Logger : public nvinfer1::ILogger {
public:
    void log(Severity severity, const char* msg) noexcept override {
        if (severity != Severity::kINFO) {
            std::cout << msg << std::endl;
        }
    }
} gLogger;

// Forward declarations
namespace nvinfer1 {
    class IRuntime;
    class ICudaEngine;
    class IExecutionContext;
}

// Destroy TensorRT objects
struct TRTDestroy {
    template <class T>
    void operator()(T* obj) const {
        if (obj)
            obj->destroy();
    }
};

template <class T>
using TRTUniquePtr = std::unique_ptr<T, TRTDestroy>;

class GridSampleEngine {
private:
    TRTUniquePtr<nvinfer1::IRuntime> runtime_;
    TRTUniquePtr<nvinfer1::ICudaEngine> engine_;
    TRTUniquePtr<nvinfer1::IExecutionContext> context_;
    std::string engine_path_;
    bool initialized_ = false;

public:
    GridSampleEngine(const std::string& engine_path) : engine_path_(engine_path) {}
    
    bool initialize() {
        std::cout << "Initializing GridSample engine: " << engine_path_ << std::endl;
        
        try {
            // Create TensorRT runtime with logger
            runtime_.reset(nvinfer1::createInferRuntime(gLogger));
            if (!runtime_) {
                std::cerr << "Failed to create TensorRT runtime" << std::endl;
                return false;
            }
            
            // Load engine file
            std::ifstream file(engine_path_, std::ios::binary);
            if (!file.good()) {
                std::cerr << "Failed to open engine file: " << engine_path_ << std::endl;
                return false;
            }
            
            file.seekg(0, std::ios::end);
            size_t size = file.tellg();
            file.seekg(0, std::ios::beg);
            
            std::vector<char> engine_data(size);
            file.read(engine_data.data(), size);
            file.close();
            
            // Create engine
            engine_.reset(runtime_->deserializeCudaEngine(engine_data.data(), size));
            if (!engine_) {
                std::cerr << "Failed to deserialize engine" << std::endl;
                return false;
            }
            
            // Create execution context
            context_.reset(engine_->createExecutionContext());
            if (!context_) {
                std::cerr << "Failed to create execution context" << std::endl;
                return false;
            }
            
            initialized_ = true;
            std::cout << "✅ Engine initialized successfully" << std::endl;
            return true;
            
        } catch (const std::exception& e) {
            std::cerr << "Exception during initialization: " << e.what() << std::endl;
            return false;
        }
    }
    
    std::vector<float> inference(const std::vector<float>& input_data, 
                                const std::vector<float>& grid_data) {
        if (!initialized_) {
            std::cerr << "Engine not initialized" << std::endl;
            return {};
        }
        
        try {
            // Allocate GPU memory
            size_t input_size = input_data.size() * sizeof(float);
            size_t grid_size = grid_data.size() * sizeof(float);
            size_t output_size = 1 * 3 * 6 * 6 * sizeof(float); // Expected output size
            
            void* buffers[3];
            cudaError_t cuda_status = cudaMalloc(&buffers[0], input_size);
            if (cuda_status != cudaSuccess) {
                std::cerr << "Failed to allocate input buffer: " << cudaGetErrorString(cuda_status) << std::endl;
                return {};
            }
            
            cuda_status = cudaMalloc(&buffers[1], grid_size);
            if (cuda_status != cudaSuccess) {
                std::cerr << "Failed to allocate grid buffer: " << cudaGetErrorString(cuda_status) << std::endl;
                cudaFree(buffers[0]);
                return {};
            }
            
            cuda_status = cudaMalloc(&buffers[2], output_size);
            if (cuda_status != cudaSuccess) {
                std::cerr << "Failed to allocate output buffer: " << cudaGetErrorString(cuda_status) << std::endl;
                cudaFree(buffers[0]);
                cudaFree(buffers[1]);
                return {};
            }
            
            // Copy input data to GPU
            cuda_status = cudaMemcpy(buffers[0], input_data.data(), input_size, cudaMemcpyHostToDevice);
            if (cuda_status != cudaSuccess) {
                std::cerr << "Failed to copy input data: " << cudaGetErrorString(cuda_status) << std::endl;
                cudaFree(buffers[0]);
                cudaFree(buffers[1]);
                cudaFree(buffers[2]);
                return {};
            }
            
            cuda_status = cudaMemcpy(buffers[1], grid_data.data(), grid_size, cudaMemcpyHostToDevice);
            if (cuda_status != cudaSuccess) {
                std::cerr << "Failed to copy grid data: " << cudaGetErrorString(cuda_status) << std::endl;
                cudaFree(buffers[0]);
                cudaFree(buffers[1]);
                cudaFree(buffers[2]);
                return {};
            }
            
            // Execute inference
            auto start_time = std::chrono::high_resolution_clock::now();
            bool success = context_->executeV2(buffers);
            auto end_time = std::chrono::high_resolution_clock::now();
            
            if (!success) {
                std::cerr << "Failed to execute inference" << std::endl;
                cudaFree(buffers[0]);
                cudaFree(buffers[1]);
                cudaFree(buffers[2]);
                return {};
            }
            
            auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);
            std::cout << "Inference time: " << duration.count() << " μs" << std::endl;
            
            // Copy output data from GPU
            std::vector<float> output_data(1 * 3 * 6 * 6);
            cuda_status = cudaMemcpy(output_data.data(), buffers[2], output_size, cudaMemcpyDeviceToHost);
            if (cuda_status != cudaSuccess) {
                std::cerr << "Failed to copy output data: " << cudaGetErrorString(cuda_status) << std::endl;
                cudaFree(buffers[0]);
                cudaFree(buffers[1]);
                cudaFree(buffers[2]);
                return {};
            }
            
            // Cleanup
            cudaFree(buffers[0]);
            cudaFree(buffers[1]);
            cudaFree(buffers[2]);
            
            return output_data;
            
        } catch (const std::exception& e) {
            std::cerr << "Exception during inference: " << e.what() << std::endl;
            return {};
        }
    }
    
    ~GridSampleEngine() {
        if (initialized_) {
            std::cout << "Cleaning up GridSample engine" << std::endl;
        }
    }
};

void analyzeDifferences(const std::vector<float>& fp16_results, const std::vector<float>& fp32_results) {
    if (fp16_results.size() != fp32_results.size()) {
        std::cout << "❌ Result sizes don't match: FP16=" << fp16_results.size() 
                  << ", FP32=" << fp32_results.size() << std::endl;
        return;
    }
    
    std::vector<float> differences;
    float max_diff = 0.0f;
    float min_diff = 0.0f;
    float sum_diff = 0.0f;
    float sum_squared_diff = 0.0f;
    int significant_diff_count = 0;
    
    for (size_t i = 0; i < fp16_results.size(); ++i) {
        float diff = fp16_results[i] - fp32_results[i];
        differences.push_back(diff);
        
        if (std::abs(diff) > 0.001f) {
            significant_diff_count++;
        }
        
        max_diff = std::max(max_diff, diff);
        min_diff = std::min(min_diff, diff);
        sum_diff += diff;
        sum_squared_diff += diff * diff;
    }
    
    float mean_diff = sum_diff / fp16_results.size();
    float variance = (sum_squared_diff / fp16_results.size()) - (mean_diff * mean_diff);
    float std_dev = std::sqrt(variance);
    
    std::cout << "\n=== GridSample FP16 vs FP32 精度对比 ===" << std::endl;
    std::cout << "总元素数量: " << fp16_results.size() << std::endl;
    std::cout << "最大差异: " << max_diff << std::endl;
    std::cout << "最小差异: " << min_diff << std::endl;
    std::cout << "平均差异: " << mean_diff << std::endl;
    std::cout << "标准差: " << std_dev << std::endl;
    std::cout << "显著差异元素 (>0.001): " << significant_diff_count 
              << " (" << (100.0f * significant_diff_count / fp16_results.size()) << "%)" << std::endl;
    
    // 计算相对误差
    float total_relative_error = 0.0f;
    int valid_count = 0;
    for (size_t i = 0; i < fp16_results.size(); ++i) {
        if (std::abs(fp32_results[i]) > 1e-6f) {
            total_relative_error += std::abs(differences[i] / fp32_results[i]);
            valid_count++;
        }
    }
    
    if (valid_count > 0) {
        float avg_relative_error = total_relative_error / valid_count;
        std::cout << "平均相对误差: " << (avg_relative_error * 100.0f) << "%" << std::endl;
    }
    
    // 显示前10个元素的对比
    std::cout << "\n前10个元素对比:" << std::endl;
    std::cout << std::setw(8) << "Index" << std::setw(15) << "FP16" << std::setw(15) << "FP32" 
              << std::setw(15) << "Difference" << std::endl;
    std::cout << std::string(55, '-') << std::endl;
    
    for (int i = 0; i < std::min(10, (int)fp16_results.size()); ++i) {
        std::cout << std::setw(8) << i 
                  << std::setw(15) << std::fixed << std::setprecision(6) << fp16_results[i]
                  << std::setw(15) << std::fixed << std::setprecision(6) << fp32_results[i]
                  << std::setw(15) << std::fixed << std::setprecision(6) << differences[i] << std::endl;
    }
}

int main() {
    std::cout << "=== GridSample FP16 vs FP32 引擎对比测试 ===" << std::endl;
    
    // 创建测试数据
    std::vector<float> input_data(1 * 3 * 8 * 8);
    std::vector<float> grid_data(1 * 6 * 6 * 2);
    
    // 初始化随机数据
    for (size_t i = 0; i < input_data.size(); ++i) {
        input_data[i] = static_cast<float>(rand()) / RAND_MAX;
    }
    
    for (size_t i = 0; i < grid_data.size(); ++i) {
        grid_data[i] = (static_cast<float>(rand()) / RAND_MAX - 0.5f) * 2.0f; // [-1, 1]
    }
    
    std::cout << "输入数据大小: " << input_data.size() << std::endl;
    std::cout << "Grid数据大小: " << grid_data.size() << std::endl;
    
    // 测试FP16引擎
    GridSampleEngine fp16_engine("gridsample_fp16.plan");
    if (!fp16_engine.initialize()) {
        std::cerr << "Failed to initialize FP16 engine" << std::endl;
        return -1;
    }
    
    std::vector<float> fp16_results = fp16_engine.inference(input_data, grid_data);
    if (fp16_results.empty()) {
        std::cerr << "FP16 inference failed" << std::endl;
        return -1;
    }
    
    // 测试FP32引擎
    GridSampleEngine fp32_engine("gridsample_fp32.plan");
    if (!fp32_engine.initialize()) {
        std::cerr << "Failed to initialize FP32 engine" << std::endl;
        return -1;
    }
    
    std::vector<float> fp32_results = fp32_engine.inference(input_data, grid_data);
    if (fp32_results.empty()) {
        std::cerr << "FP32 inference failed" << std::endl;
        return -1;
    }
    
    // 分析差异
    analyzeDifferences(fp16_results, fp32_results);
    
    std::cout << "\n✅ GridSample引擎对比测试完成!" << std::endl;
    return 0;
} 