#include <iostream>
#include <fstream>
#include <vector>
#include <cuda_runtime.h>
#include <NvInfer.h>
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

// 定义日志回调类
class Logger : public nvinfer1::ILogger {
public:
    void log(Severity severity, const char* msg) noexcept override {
        if (severity <= Severity::kWARNING) {
            std::cout << msg << std::endl;
        }
    }
} gLogger;


bool cv2vec(const cv::Mat src_image, std::vector<float> &input_vectors) {
      cv::Mat channels[3]; //借用来进行HWC->CHW
      cv::split(src_image, channels);
      for (int i = 0; i < src_image.channels(); i++)  // HWC->CHW
      {
          std::vector<float> data = std::vector<float>(channels[i].reshape(1, src_image.cols * src_image.rows));
          input_vectors.insert(input_vectors.end(), data.begin(), data.end());
      }
      
      return true;
  }

bool image_preprocess(const cv::Mat &src_image, int width, int height, std::vector<float> &input_vectors, std::vector<float>&means, std::vector<float>&stds) {
    cv::Mat src_temp, dst_temp, dst_image;
    // 1. resize by CUBIC mode
    cv::resize(src_image, src_temp, cv::Size(width, height), 0, 0, cv::INTER_CUBIC);
    src_temp.convertTo(dst_temp, CV_32F);
    dst_temp.copyTo(dst_image);

    // 2. convert BGR to RGB
    cv::cvtColor(dst_image, dst_image, cv::COLOR_BGR2RGB);
    for (int i = 0; i < dst_image.rows; i++) {
        for (int j = 0; j < dst_image.cols; j++) {
            cv::Vec3f buf = dst_image.at<cv::Vec3f>(i,j);
            // printf("buf %f %f %f\n", buf[0], buf[1], buf[2]);
            buf[0] = ((buf[0] / 255.0f) - means[0]) / stds[0];
            buf[1] = ((buf[1] / 255.0f) - means[1]) / stds[1];
            buf[2] = ((buf[2] / 255.0f) - means[2]) / stds[2];
            dst_image.at<cv::Vec3f>(i,j) = buf;
            // printf("buf %f %f %f\n", buf[0], buf[1], buf[2]);
        }
    }

    // 4.convert mat to vector
    cv2vec(dst_image, input_vectors);

    return true;
  }


// 从文件加载TensorRT引擎
nvinfer1::ICudaEngine* loadEngineFromFile(const std::string& engineFilePath) {
    std::ifstream file(engineFilePath, std::ios::binary);
    if (!file.good()) {
        std::cerr << "Failed to open engine file: " << engineFilePath << std::endl;
        return nullptr;
    }

    file.seekg(0, file.end);
    size_t size = file.tellg();
    file.seekg(0, file.beg);

    std::vector<char> buffer(size);
    file.read(buffer.data(), size);
    file.close();

    nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(gLogger);
    if (!runtime) {
        std::cerr << "Failed to create TensorRT runtime!" << std::endl;
        return nullptr;
    }

    nvinfer1::ICudaEngine* engine = runtime->deserializeCudaEngine(buffer.data(), size, nullptr);
    if (!engine) {
        std::cerr << "Failed to deserialize engine!" << std::endl;
        runtime->destroy();
        return nullptr;
    }

    runtime->destroy();
    return engine;
}


// 执行推理
void infer(const std::string& engineFilePath) {
    std::vector<float> means = {0.48145466, 0.4578275, 0.40821073};
    std::vector<float> stds = {0.26862954, 0.26130258, 0.27577711};
    nvinfer1::ICudaEngine* engine = loadEngineFromFile(engineFilePath);
    if (!engine) {
        std::cerr << "Failed to load engine!" << std::endl;
        return;
    }

    // 创建执行上下文
    nvinfer1::IExecutionContext* context = engine->createExecutionContext();
    if (!context) {
        std::cerr << "Failed to create execution context!" << std::endl;
        engine->destroy();
        return;
    }

    // 获取输入和输出数量
    int numInputs = 0, numOutputs = 0;
    for (int i = 0; i < engine->getNbBindings(); ++i) {
        if (engine->bindingIsInput(i)) {
            ++numInputs;
        } else {
            ++numOutputs;
        }
    }

    std::cout << "Number of inputs: " << numInputs << std::endl;
    std::cout << "Number of outputs: " << numOutputs << std::endl;

    
    // 分配设备内存
    std::vector<size_t> input_sizes;
    std::vector<void*> buffers(engine->getNbBindings());
    for (int i = 0; i < engine->getNbBindings(); ++i) {
        nvinfer1::Dims dims = engine->getBindingDimensions(i);
        const bool isDynamicInput = std::any_of(dims.d, dims.d + dims.nbDims, [](int dim){ return dim == -1; });
        if (isDynamicInput){
                dims.d[0] = 1;
        }
        if (engine->bindingIsInput(i))
        {
            context->setBindingDimensions(i, dims);
        }
        size_t size = 1;
        for (int j = 0; j < dims.nbDims; ++j) {
            size *= dims.d[j];
        }
        size *= sizeof(float); 
        input_sizes.push_back(size);
        cudaMalloc(&buffers[i], size);
        std::cout << "Allocated buffer for binding " << i << " with size " << size << std::endl;
    }

    // 准备输入数据（示例）
    for (int i = 0; i < numInputs; ++i) {
        nvinfer1::Dims dims = engine->getBindingDimensions(i);
        cv::Mat image_roi = cv::imread("../1.jpg");
        std::vector<float> input_data;
        image_preprocess(image_roi, 224, 224, input_data, means, stds);
        cudaMemcpy(buffers[i], input_data.data(), input_sizes[i], cudaMemcpyHostToDevice);
        std::cout << "Copied input data for binding " << i << std::endl;
    }

    // 执行推理
    bool success = context->executeV2(buffers.data());
    if (!success) {
        std::cerr << "Failed to execute inference!" << std::endl;
    } else {
        std::cout << "Inference executed successfully!" << std::endl;
    }

    // 获取输出数据
    for (int i = 0; i < numOutputs; ++i) {
        int bindingIndex = numInputs + i;
        nvinfer1::Dims dims = engine->getBindingDimensions(bindingIndex);
        size_t size = input_sizes[i]/ sizeof(float);
        std::vector<float> outputData(size);
        cudaMemcpy(outputData.data(), buffers[bindingIndex], input_sizes[bindingIndex], cudaMemcpyDeviceToHost);
        std::cout << "Output data for binding " << bindingIndex << ": ";
        for (size_t j = 0; j < std::min<size_t>(10, size); ++j) {
            std::cout << outputData[j] << " ";
        }
        std::cout << std::endl;
    }

    // 清理资源
    for (void* buffer : buffers) {
        cudaFree(buffer);
    }
    context->destroy();
    engine->destroy();
}

int main() {
    // TensorRT引擎文件路径
    std::string engineFilePath = "/home/xjl/Ascend_6/test_model/imagebind_cos.engine";

    // 执行推理
    infer(engineFilePath);

    return 0;
}