//
// Created by z00850016 on 2025/8/21.
//

#include <fstream>
#include "LocalModelExe.h"

namespace localvit {
    LocalModelExe::LocalModelExe() = default;

    LocalModelExe::~LocalModelExe() = default;

    ModelStatus LocalModelExe::Init(const std::string &modelPath) {
        ModelStatus ret = nnExecutor.Init();
        if (ret != MODEL_STATUS_SUCCESS) {
            return ret;
        }

        if (modelPath.empty()) {
            LOGE("model path is empty.");
            return MODEL_STATUS_ERROR;
        }

        ret = nnExecutor.InitModels(modelPath, "bert62m_npu_input32.omc");
        if (ret != MODEL_STATUS_SUCCESS) {
            return ret;
        }
//        ret = nnExecutor.InitModels(modelPath, "classify.omc");
//        if (ret != MODEL_STATUS_SUCCESS) {
//            return ret;
//        }
        ret = nnExecutor.InitModels(modelPath, "vit86m_with_sim_768.omc");
        if (ret != MODEL_STATUS_SUCCESS) {
            return ret;
        }

        ret = InitTokenizer(modelPath + "vocab.txt");
        if (ret != MODEL_STATUS_SUCCESS) {
            return ret;
        }
        return MODEL_STATUS_SUCCESS;
    }

    ModelStatus LocalModelExe::DeInit() {
        return nnExecutor.DeInit();
    }

    ModelStatus LocalModelExe::InitTokenizer(const std::string &vocabPath) {
        std::ifstream vocabFile(vocabPath, std::ios::binary | std::ios::ate);
        if (!vocabFile.is_open()) {
            LOGE("failed to open: %s.", vocabPath.c_str());
            return MODEL_STATUS_ERROR;
        }

        size_t fileSize = vocabFile.tellg();
        vocabFile.seekg(0, std::ios::beg);

        char* buffer = new char[fileSize + 1];
        vocabFile.read(buffer, fileSize);
        buffer[fileSize] = '\0';

        tokenizer.LoadVocab(buffer, fileSize);

//        LOGD("filesize: %zu", fileSize);
//        buffer[20] = '\0';
//        LOGD("%s", buffer);

        delete[] buffer;
        return MODEL_STATUS_SUCCESS;
    }

    ModelStatus LocalModelExe::ProcessVit(std::vector<localvit::VisionBuffer> &inputBuffers,
                                          std::vector<localvit::VisionBuffer> &outputBuffers) {
        return nnExecutor.Process("vit86m_with_sim_768.omc",
                              inputBuffers, outputBuffers);
    }

    ModelStatus LocalModelExe::ProcessBert(const std::string &text,
                                           std::vector<localvit::VisionBuffer> &outputBuffers) {
        std::vector<int> inputIds;
        std::vector<std::string> token;
        std::vector<int> offset;
        tokenizer.EncodeText(text, inputIds, token, offset);

        if (inputIds.size() > 32) {
            LOGE("inputIds.size()=%zu is too long", inputIds.size());
            return MODEL_STATUS_ERROR;
        }

        float attentionMask[32] = {0.0f};

        for (int i = 0; i < inputIds.size(); ++i) {
            attentionMask[i] = 1.0f;
        }

        inputIds.resize(32);

        localvit::VisionBuffer VisionBufInputIds;
        VisionBufInputIds.dataType = localvit::VisionBufferDataType::INT32;
        VisionBufInputIds.buffer = static_cast<void *>(inputIds.data());
        VisionBufInputIds.bufferSize = {1, 32, 1, 1};

        localvit::VisionBuffer inputAttentionMask;
        inputAttentionMask.dataType = localvit::VisionBufferDataType::FLOAT32;
        inputAttentionMask.buffer = static_cast<void *>(attentionMask);
        inputAttentionMask.bufferSize = {1, 32, 1, 1};

        std::vector<localvit::VisionBuffer> inputBuffers;
        inputBuffers.push_back(VisionBufInputIds);
        inputBuffers.push_back(inputAttentionMask);

        return nnExecutor.Process("bert62m_npu_input32.omc",inputBuffers,outputBuffers);

    }
}