
#include <NvInfer.h>
#include <NvOnnxParser.h>
#include <memory>
#include <iostream>
#include <unistd.h>
#include <algorithm>
#include <fstream>



class Logger : public nvinfer1::ILogger {
public:
    void log(Severity severity, const char* msg) noexcept override {
        if (severity <= Severity::kWARNING) {
            std::cout << msg << std::endl;
        }
    }
} gLogger;

void saveEngineToFile(const std::string& engineFilePath, nvinfer1::ICudaEngine* engine) {
    nvinfer1::IHostMemory* serializedEngine = engine->serialize();
    if (!serializedEngine) {
        std::cerr << "Failed to serialize engine!" << std::endl;
        return;
    }

    std::ofstream outFile(engineFilePath, std::ios::binary);
    if (!outFile) {
        std::cerr << "Failed to open file for saving engine: " << engineFilePath << std::endl;
        return;
    }

    outFile.write(reinterpret_cast<const char*>(serializedEngine->data()), serializedEngine->size());
    outFile.close();

    std::cout << "Engine saved to " << engineFilePath << std::endl;
    serializedEngine->destroy();
}

bool buildEngineFromOnnx(const std::string& onnxFilePath, const std::string& engineFilePath,int Height, int Width) {

    nvinfer1::IBuilder* builder = nvinfer1::createInferBuilder(gLogger);
    if (!builder) {
        std::cerr << "Failed to create TensorRT builder!" << std::endl;
        return false;
    }

    auto network = builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
    if (!network) {
        std::cerr << "Failed to create TensorRT network!" << std::endl;
        return false;
    }

    nvonnxparser::IParser* parser = nvonnxparser::createParser(*network, gLogger);
    if (!parser) {
        std::cerr << "Failed to create ONNX parser!" << std::endl;
        return false;
    }

    if (!parser->parseFromFile(onnxFilePath.c_str(), static_cast<int>(nvinfer1::ILogger::Severity::kWARNING))) {
        std::cerr << "Failed to parse ONNX file: " << onnxFilePath << std::endl;
        return false;
    }

    nvinfer1::IBuilderConfig* config = builder->createBuilderConfig();
    if (!config) {
        std::cerr << "Failed to create TensorRT builder config!" << std::endl;
        return false;
    }

    config->setMaxWorkspaceSize(1 << 30);

    nvinfer1::IOptimizationProfile* profile = builder->createOptimizationProfile();
    if (!profile) {
        std::cerr << "Failed to create optimization profile!" << std::endl;
        return false;
    }

    for (unsigned int i = 0, n = network->getNbInputs(); i < n; i++)
    {
        nvinfer1::ITensor* inputTensor = network->getInput(i);
        if (!inputTensor) {
            std::cerr << "Failed to get input tensor from the network!" << std::endl;
            return false;
        }
        nvinfer1::Dims dims = inputTensor->getDimensions();
        const bool isDynamicInput = std::any_of(dims.d, dims.d + dims.nbDims, [](int dim){ return dim == -1; }) || inputTensor->isShapeTensor();
        if (isDynamicInput){

            profile->setDimensions(inputTensor->getName(), nvinfer1::OptProfileSelector::kMIN,
                                    nvinfer1::Dims4{1, 3, Height, Width});
            profile->setDimensions(inputTensor->getName(), nvinfer1::OptProfileSelector::kOPT,
                                    nvinfer1::Dims4{2, 3, Height, Width});
            profile->setDimensions(inputTensor->getName(), nvinfer1::OptProfileSelector::kMAX,
                                    nvinfer1::Dims4{4, 3, Height, Width});
        }
    }
    
    config->addOptimizationProfile(profile);

    nvinfer1::ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
    if (!engine) {
        std::cerr << "Failed to build TensorRT engine!" << std::endl;
        return false;
    }

    saveEngineToFile(engineFilePath, engine);

    engine->destroy();
    config->destroy();
    parser->destroy();
    network->destroy();
    builder->destroy();

    return true;
}

int main() {
    // std::string package_name = "pretrained_model";
    // std::string package_dir = cognition::getPackagePath(package_name);

    // std::string onnxFilePath = package_dir +"/cognition/imagebind/gpu/YoloV7Gpu_Car_0_Imagebind/imagebind.onnx";
    // if(access(onnxFilePath.c_str(), F_OK) == 0){
    //     std::cout<<"YoloV7Gpu_Car_0_Imagebind model is not exit, path: "<<onnxFilePath<<std::endl;
    // }
    // std::string onnxFilePath2 = package_dir +"/cognition/imagebind/gpu/YoloV7RgbtGpu_Car_0_Imagebind/imagebind.onnx";
    // if(access(onnxFilePath2.c_str(), F_OK) == 0){
    //     std::cout<<"YoloV7Gpu_Car_0_Imagebind model is not exit, path: "<<onnxFilePath2<<std::endl;
    // }

    // std::string engineFilePath = package_dir +"/cognition/imagebind/gpu/YoloV7Gpu_Car_0_Imagebind/imagebind.engine";
    // std::string engineFilePath2 = package_dir +"/cognition/imagebind/gpu/YoloV7RgbtGpu_Car_0_Imagebind/imagebind.engine"

    std::string onnxFilePath = "/home/xjl/Ascend_6/test_model/imagebind_cos.onnx";
     if(access(onnxFilePath.c_str(), F_OK) == 0){
        std::cout<<"YoloV7Gpu_Car_0_Imagebind model is not exit, path: "<<onnxFilePath<<std::endl;
    }
    std::string onnxFilePath2 = "/home/xjl/Ascend_6/test_model/imagebind_sin.onnx";

    std::string engineFilePath = "/home/xjl/Ascend_6/test_model/imagebind_cos.engine";
    std::string engineFilePath2 = "/home/xjl/Ascend_6/test_model/imagebind_sin.engine";

    int Height = 224;
    int Width = 224;

    // 调用函数构建引擎
    if (buildEngineFromOnnx(onnxFilePath, engineFilePath,Height,Width)) {
        std::cout << "Successfully converted ONNX to TensorRT engine!" << std::endl;
    } else {
        std::cerr << "Failed to convert ONNX to TensorRT engine!" << std::endl;
    }

    if (buildEngineFromOnnx(onnxFilePath2, engineFilePath2,Height,Width)) {
        std::cout << "Successfully converted ONNX to TensorRT engine!" << std::endl;
    } else {
        std::cerr << "Failed to convert ONNX to TensorRT engine!" << std::endl;
    }

    return 0;
}