//
// Created by z00850016 on 2025/8/12.
//

#include <dlfcn.h>
#include <utility>
#include <dirent.h>
#include <sstream>
#include "nnexecutor.h"


namespace localvit {
    NNExecutor::NNExecutor() = default;

    NNExecutor::~NNExecutor() = default;

    ModelStatus NNExecutor::Init() {
        modelManagerClient = std::make_shared<hiai::AiModelMngerClient>();
        if (modelManagerClient == nullptr) {
            LOGE("hiai model manager client is null.");
            return MODEL_STATUS_ERROR;
        }

        hiai::AIStatus ret = modelManagerClient->Init(nullptr);
        if (ret != hiai::AI_SUCCESS) {
            LOGE("hiai init fail.");
            return MODEL_STATUS_ERROR;
        }

        return MODEL_STATUS_SUCCESS;
    }

    ModelStatus NNExecutor::InitModels(const std::string &modelPath, const std::string &modelName) {
        if (modelPath.empty()) {
            LOGE("model path is empty.");
            return MODEL_STATUS_ERROR;
        }

        modelBuilder = std::make_shared<hiai::AiModelBuilder>(
                modelManagerClient);
        if (modelBuilder == nullptr) {
            LOGE("hiai modelBuilder init failed.");
            return MODEL_STATUS_ERROR;
        }
        std::string resourcePath = modelPath + modelName;
        LOGE("resource path is: %s.", resourcePath.c_str());
        inputMemBuffer = modelBuilder->InputMemBufferCreate(resourcePath);
        std::shared_ptr<hiai::AiModelDescription> modelDescription =
                std::make_shared<hiai::AiModelDescription>(modelName.c_str(),
                                                           hiai::AiModelDescription_Frequency_EXTREME,
                                                           hiai::HIAI_FRAMEWORK_CAFFE,
                                                           hiai::HIAI_MODELTYPE_ONLINE,
                                                           hiai::AiModelDescription_DeviceType_NPU);
        hiai::AIStatus ret = modelDescription->SetModelBuffer(inputMemBuffer->GetMemBufferData(),
                                                              inputMemBuffer->GetMemBufferSize());
        if (ret != hiai::AI_SUCCESS) {
            LOGE("set input model buffer failed.");
            modelBuilder->MemBufferDestroy(inputMemBuffer);
            return MODEL_STATUS_ERROR;
        }
        ret = modelDescription->SetTuningStrategy(hiai::TuningStrategy::ON_DEVICE_TUNING);
        if (ret != hiai::AI_SUCCESS) {
            LOGE("set tuning strategy failed.");
            modelBuilder->MemBufferDestroy(inputMemBuffer);
            return MODEL_STATUS_ERROR;
        }
        std::vector<std::shared_ptr<hiai::AiModelDescription>> modelDescs;
        modelDescs.push_back(modelDescription);


        ret = modelManagerClient->Load(modelDescs);
        if (ret != hiai::AI_SUCCESS) {
            LOGE("load model failed.");
            modelBuilder->MemBufferDestroy(inputMemBuffer);
            return MODEL_STATUS_ERROR;
        }
        LOGI("load model success.");
        return MODEL_STATUS_SUCCESS;
    }

    ModelStatus NNExecutor::DeInit() {
        if (modelManagerClient != nullptr) {
            if (inputMemBuffer != nullptr && modelBuilder != nullptr) {
                modelBuilder->MemBufferDestroy(inputMemBuffer);
            }
            hiai::AIStatus status = modelManagerClient->UnLoadModel();
            if (status != hiai::AI_SUCCESS) {
                LOGE("unload model fail.");
                modelManagerClient = nullptr;
                inputTensors.clear();
                outputTensors.clear();
                return MODEL_STATUS_ERROR;
            }

            modelManagerClient = nullptr;
            inputTensors.clear();
            outputTensors.clear();
            return MODEL_STATUS_SUCCESS;
        }
        return MODEL_STATUS_SUCCESS;
    }


//
//    ModelStatus NNExecutor::EncodeText(const std::string &text, std::vector<int> inputIds) {
//        tokenizer.EncodeText(text, inputIds);
//    }
//
//    ModelStatus NNExecutor::Process(const std::string &modelName,
//                                       VisionBuffer &inputBuffer,
//                                       VisionBuffer &outputBuffer) {
//
//        if (inputBuffer.buffer == nullptr) {
//            LOGE("input buffer is null");
//            return MODEL_STATUS_ERROR;
//        }
//
//        std::vector<hiai::TensorDimension> inputTensorDimensions;
//        std::vector<hiai::TensorDimension> outputTensorDimensions;
//        hiai::AIStatus ret = modelManagerClient->GetModelIOTensorDim(modelName,
//                                                                     inputTensorDimensions,
//                                                                     outputTensorDimensions);
//        if (ret != hiai::AI_SUCCESS) {
//            LOGE("GetModelIOTensorDim fail.");
//            return MODEL_STATUS_ERROR;
//        }
//        // 可以优化
//        InitInputTensor(inputTensorDimensions, inputBuffer);
//
//        InitOutputTensor(outputTensorDimensions);
//
//        hiai::AiContext context;
//        std::string key = "model_name";
//        context.AddPara(key, modelName);
//
//        int timeout = 1000;
//        int stamp = 0;
//        long time_use;
//        struct timeval tpStart{}, tpEnd{};
//        gettimeofday(&tpStart, nullptr);
//        ret = modelManagerClient->Process(context, inputTensors, outputTensors,
//                                          timeout, stamp);
//        if (ret != hiai::AI_SUCCESS) {
//            LOGE("model process fail.");
//            return MODEL_STATUS_ERROR;
//        }
//        gettimeofday(&tpEnd, nullptr);
//        time_use = 1000000 * (tpEnd.tv_sec - tpStart.tv_sec) + tpEnd.tv_usec - tpStart.tv_usec;
//        LOGI("hiai vision process time %ld ms.", time_use / 1000);
//
//        void *originOutputBuffer = outputTensors[0]->GetBuffer();
//        outputBuffer.buffer = originOutputBuffer;
//        outputBuffer.bufferSize = {outputTensors[0]->GetTensorDimension().GetNumber(),
//                                   outputTensors[0]->GetTensorDimension().GetChannel(),
//                                   outputTensors[0]->GetTensorDimension().GetWidth(),
//                                   outputTensors[0]->GetTensorDimension().GetHeight()};
//
//        return MODEL_STATUS_SUCCESS;
//    }

    ModelStatus NNExecutor::Process(const std::string &modelName,
                                    std::vector<VisionBuffer> &inputBuffers,
                                    std::vector<VisionBuffer> &outputBuffers) {
        std::vector<hiai::TensorDimension> inputTensorDimensions;
        std::vector<hiai::TensorDimension> outputTensorDimensions;
        hiai::AIStatus ret = modelManagerClient->GetModelIOTensorDim(modelName,
                                                                     inputTensorDimensions,
                                                                     outputTensorDimensions);
        if (ret != hiai::AI_SUCCESS) {
            LOGE("GetModelIOTensorDim fail.");
            return MODEL_STATUS_ERROR;
        }
        // 可以优化
        ModelStatus retStatus = InitInputTensor(inputTensorDimensions, inputBuffers);
        if (retStatus != MODEL_STATUS_SUCCESS) {
            LOGE("InitInputTensor fail.");
            return MODEL_STATUS_ERROR;
        }

        retStatus = InitOutputTensor(outputTensorDimensions);
        if (retStatus != MODEL_STATUS_SUCCESS) {
            LOGE("InitOutputTensor fail.");
            return MODEL_STATUS_ERROR;
        }

        hiai::AiContext context;
        std::string key = "model_name";
        context.AddPara(key, modelName);

        int timeout = 1000;
        int stamp = 0;
        long time_use;
        struct timeval tpStart{}, tpEnd{};
        gettimeofday(&tpStart, nullptr);
        ret = modelManagerClient->Process(context, inputTensors, outputTensors,
                                          timeout, stamp);
        if (ret != hiai::AI_SUCCESS) {
            LOGE("model process fail.");
            return MODEL_STATUS_ERROR;
        }
        gettimeofday(&tpEnd, nullptr);
        time_use = 1000000 * (tpEnd.tv_sec - tpStart.tv_sec) + tpEnd.tv_usec - tpStart.tv_usec;
        LOGI("hiai vision process time %ld ms.", time_use / 1000);

        FillInOutputBuffer(outputBuffers);

        return MODEL_STATUS_SUCCESS;
    }

    void NNExecutor::FillInOutputBuffer(std::vector<VisionBuffer> &outputBuffers) {
        outputBuffers.clear();
        for (const auto& outTensor : outputTensors) {
            VisionBuffer outBuf{};
            outBuf.buffer = outTensor->GetBuffer();
            outBuf.bufferSize = {outTensor->GetTensorDimension().GetNumber(),
                                 outTensor->GetTensorDimension().GetChannel(),
                                 outTensor->GetTensorDimension().GetWidth(),
                                 outTensor->GetTensorDimension().GetHeight()};

            outputBuffers.push_back(outBuf);
        }
    }

    std::vector<std::shared_ptr<hiai::AiTensor>> NNExecutor::GetInputTensors() {
        return inputTensors;
    }

    std::vector<std::shared_ptr<hiai::AiTensor>> NNExecutor::GetOutputTensors() {
        return outputTensors;
    }
//
//    ModelStatus NNExecutor::InitInputTensor(const std::vector<hiai::TensorDimension> &inputDimensions,
//                                               const VisionBuffer &inputBuffer) {
//        inputTensors.clear();
//        for (auto &inputTensorDimension : inputDimensions) {
//            uint32_t ni = inputTensorDimension.GetNumber();
//            uint32_t ci = inputTensorDimension.GetChannel();
//            uint32_t hi = inputTensorDimension.GetHeight();
//            uint32_t wi = inputTensorDimension.GetWidth();
//            LOGI("inputTensorDimension ni = %d", ni);
//            LOGI("inputTensorDimension ci = %d", ci);
//            LOGI("inputTensorDimension hi = %d", hi);
//            LOGI("inputTensorDimension wi = %d", wi);
//
//            // normal mode - for test and beta environment
//            std::shared_ptr<hiai::AiTensor> inputTensor = std::make_shared<hiai::AiTensor>();
//            hiai::TensorDimension inputDimension(ni, ci, hi, wi);
//            hiai::AIStatus ret = inputTensor->Init(&inputDimension, hiai::HIAI_DATATYPE_FLOAT32);
//            if (ret != hiai::AI_SUCCESS) {
//                LOGE("init input tensor fail.");
//                return MODEL_STATUS_ERROR;
//            }
//            auto inputTensorBuffer = static_cast<float *>(inputTensor->GetBuffer());
//            memcpy(inputTensorBuffer, inputBuffer.buffer, inputTensor->GetSize());
//            inputTensors.push_back(inputTensor);
//        }
//        return MODEL_STATUS_SUCCESS;
//    }

    ModelStatus NNExecutor::InitInputTensor(const std::vector<hiai::TensorDimension> &inputDimensions,
                                               const std::vector<VisionBuffer> &inputBuffers) {
        if (inputDimensions.size() != inputBuffers.size()) {
            LOGE("Mismatch between number of input dimensions (%zu) and input buffers (%zu).",
                 inputDimensions.size(), inputBuffers.size());
            return MODEL_STATUS_ERROR;
        }

        inputTensors.clear();
        for (size_t i = 0; i < inputDimensions.size(); ++i) {
            const auto &inputTensorDimension = inputDimensions[i];
            const auto &inputBuffer = inputBuffers[i];

            if (inputBuffer.buffer == nullptr) {
                LOGE("Input buffer at index %zu is null.", i);
                return MODEL_STATUS_ERROR;
            }

            uint32_t ni = inputTensorDimension.GetNumber();
            uint32_t ci = inputTensorDimension.GetChannel();
            uint32_t hi = inputTensorDimension.GetHeight();
            uint32_t wi = inputTensorDimension.GetWidth();
            LOGD("InputTensorDimension[%zu]: ni = %d, ci = %d, hi = %d, wi = %d", i, ni, ci, hi, wi);
            LOGD("inputBuffer[%zu]: ni = %d, ci = %d, hi = %d, wi = %d", i,
                 inputBuffer.bufferSize.num, inputBuffer.bufferSize.channel, inputBuffer.bufferSize.height, inputBuffer.bufferSize.width);

            std::shared_ptr<hiai::AiTensor> inputTensor = std::make_shared<hiai::AiTensor>();
            //hiai::TensorDimension currentDimension(ni, ci, hi, wi);

            hiai::HIAI_DataType tensorDataType;
            switch (inputBuffer.dataType) {
                case VisionBufferDataType::FLOAT32:
                    tensorDataType = hiai::HIAI_DATATYPE_FLOAT32;
                    break;
                case VisionBufferDataType::UINT8:
                    tensorDataType = hiai::HIAI_DATATYPE_UINT8;
                    break;
                case VisionBufferDataType::INT32:
                    tensorDataType = hiai::HIAI_DATATYPE_INT32;
                    break;
                case VisionBufferDataType::INT64:
                    tensorDataType = hiai::HIAI_DATATYPE_INT64;
                    break;
                default:
                    LOGE("Unsupported data type in VisionBuffer at index %zu.", i);
                    return MODEL_STATUS_ERROR;
            }

            hiai::AIStatus ret = inputTensor->Init(&inputTensorDimension, tensorDataType);
            if (ret != hiai::AI_SUCCESS) {
                LOGE("Init input tensor %zu fail.", i);
                return MODEL_STATUS_ERROR;
            }

            void *tensorBuffer = inputTensor->GetBuffer();
            if (tensorBuffer == nullptr) {
                LOGE("Failed to get buffer for input tensor %zu.", i);
                return MODEL_STATUS_ERROR;
            }

            if (inputTensor->GetSize() == 0) {
                LOGE("Input tensor %zu has zero size.", i);
                return MODEL_STATUS_ERROR;
            }

            memcpy(tensorBuffer, inputBuffer.buffer, inputTensor->GetSize());
            inputTensors.push_back(inputTensor);
        }
        return MODEL_STATUS_SUCCESS;
    }

//
//    ModelStatus NNExecutor::InitOutputTensor(
//            const std::vector<hiai::TensorDimension> &outputDimensions,
//            std::vector<VisionBuffer> &targetVisionBuffers) {
//        outputTensors.clear();
//        targetVisionBuffers.clear();
//
//        if (outputDimensions.empty()) {
//            LOGI("Model has no output dimensions specified.");
//            return MODEL_STATUS_ERROR;
//        }
//
//        for (size_t i = 0; i < outputDimensions.size(); ++i) {
//            const auto &outputTensorDimension = outputDimensions[i];
//            uint32_t no = outputTensorDimension.GetNumber();
//            uint32_t co = outputTensorDimension.GetChannel();
//            uint32_t ho = outputTensorDimension.GetHeight();
//            uint32_t wo = outputTensorDimension.GetWidth();
//            LOGI("OutputTensorDimension %zu: no = %d, co = %d, ho = %d, wo = %d", i, no, co, ho, wo);
//
//            std::shared_ptr<hiai::AiTensor> outputTensor = std::make_shared<hiai::AiTensor>();
//            hiai::TensorDimension currentDimension(no, co, ho, wo);
//            hiai::AIStatus ret = outputTensor->Init(&currentDimension, hiai::HIAI_DATATYPE_FLOAT32); // Or determine type if known
//            if (ret != hiai::AI_SUCCESS) {
//                LOGE("Init output tensor %zu failed.", i);
//                return MODEL_STATUS_ERROR;
//            }
//            outputTensors.push_back(outputTensor);
//
//            VisionBuffer vb;
//            targetVisionBuffers.push_back(vb);
//        }
//        return MODEL_STATUS_SUCCESS;
//    }

    ModelStatus NNExecutor::InitOutputTensor(
            const std::vector<hiai::TensorDimension> &outputDimensions) {
        outputTensors.clear();
        for (auto &outputTensorDimension: outputDimensions) {
            uint32_t no = outputTensorDimension.GetNumber();
            uint32_t co = outputTensorDimension.GetChannel();
            uint32_t ho = outputTensorDimension.GetHeight();
            uint32_t wo = outputTensorDimension.GetWidth();
            LOGI("outputTensorDimension no = %d", no);
            LOGI("outputTensorDimension co = %d", co);
            LOGI("outputTensorDimension ho = %d", ho);
            LOGI("outputTensorDimension wo = %d", wo);

            std::shared_ptr<hiai::AiTensor> outputTensor = std::make_shared<hiai::AiTensor>();
            hiai::TensorDimension outputDimension(no, co, ho, wo);
            hiai::AIStatus ret = outputTensor->Init(&outputDimension, hiai::HIAI_DATATYPE_FLOAT32);
            if (ret != hiai::AI_SUCCESS) {
                LOGE("init output tensor fail.");
                return MODEL_STATUS_ERROR;
            }
            outputTensors.push_back(outputTensor);
        }
        return MODEL_STATUS_SUCCESS;
    }

}

