#include "bertinfer.h"
#include <iostream>
#include <vector>
#include <iostream>
#include <torch/torch.h>
#include <torch/script.h>
#include "fast_tokenizer/tokenizers/ernie_fast_tokenizer.h"


#include <map>
#include <string>
#include <cassert>
#include <algorithm> // For std::replace
#include <regex> // For std::regex_replace
//#include <chrono>



std::map<std::string, std::string> mapping_dict = {
    {"position", "职位"},
    {"company", "公司"},
    {"book", "书籍"},
    {"address", "地址"},
    {"scene", "场景"},
    {"mobile", "手机号"},
    {"email", "邮箱"},
    {"game", "游戏"},
    {"government", "政府"},
    {"QQ", "QQ号"},
    {"vx", "微信号"},
    {"name", "姓名"},
    {"movie", "电影"},
    {"organization", "组织"}
};

std::map<std::string, std::vector<std::string>> get_output_format(const std::vector<std::string>& origin_token, const std::vector<std::string>& predict_token) {
    std::map<std::string, std::vector<std::string>> result_dict;
    assert(origin_token.size() == predict_token.size());

    for (size_t i = 0; i < predict_token.size(); ++i) {
        std::string label_token = predict_token[i];
        if (label_token != "O") {
            size_t pos = label_token.find('-');
            std::string label_0 = label_token.substr(0, pos);
            std::string label_1 = label_token.substr(pos + 1);

            auto it = mapping_dict.find(label_1);
            if (it != mapping_dict.end()) {
                label_1 = it->first;
            }

            std::string tmp_token = origin_token[i];
            if (tmp_token[0] == '#') {
                tmp_token = std::regex_replace(tmp_token, std::regex("#"), "");
                if (!result_dict[label_1].empty()) {
                    result_dict[label_1].back() += tmp_token;
                }
            } else {
                if (label_0 == "B" || result_dict[label_1].empty()) {
                    result_dict[label_1].push_back(tmp_token);
                } else {
                    result_dict[label_1].back() += tmp_token;
                }
            }
        }
    }
    return result_dict;
}

std::vector<std::string> split_string_by_space(const std::string& str) {
    std::vector<std::string> tokens = {"[CLS]"};
    std::istringstream iss(str);
    std::string token;

    // Extract each token and emplace it into the vector
    while (iss >> token) {
        tokens.emplace_back(token);
    }

    tokens.push_back("[SEP]");
    return tokens;
}

torch::Tensor tensorFromVector(std::vector<uint32_t> ids)
{

    int64_t seq_len = ids.size();
    torch::Tensor tensor = torch::empty({seq_len}, torch::kInt64);


    for (int64_t i = 0; i < seq_len; ++i) {
        tensor[i] = (int64_t)ids.at(i);
    }

    return tensor;
}

BertInfer* BertInfer::getInstance() {
    if (instance == nullptr)
    {
        instance = new BertInfer("/usr/share/live-image-text/res/traced_quantized_bert.pt", "/usr/share/live-image-text/res/vocab.txt");
    }
    return instance;
}
using namespace paddlenlp;
BertInfer *BertInfer::instance = nullptr;
fast_tokenizer::tokenizers_impl::ErnieFastTokenizer *tokenizer;
torch::jit::script::Module module;
BertInfer::BertInfer(const std::string &modelPath, const std::string &vocabPath)
{
    tokenizer = new fast_tokenizer::tokenizers_impl::ErnieFastTokenizer(vocabPath);

    try {
        module = torch::jit::load(modelPath);
    }
    catch (const c10::Error& e) {
        std::cerr << "error loading the model\n";
        return ;
    }
    std::cout << "模型加载成功" << std::endl;

    torch::DeviceType device_type;
    if (torch::cuda::is_available()) {
        std::cout << "CUDA available! Predicting on GPU." << std::endl;
        device_type = torch::kCUDA;
    } else {
        std::cout << "Predicting on CPU." << std::endl;
        device_type = torch::kCPU;
    }
    torch::Device device(device_type);
    module.to(device);
}

std::map<std::string, std::vector<std::string>> BertInfer::runInference(const std::string &input)
{

    auto start = std::chrono::high_resolution_clock::now();

    fast_tokenizer::core::Encoding encoding;
    //    std::string single_string =
    //            "我叫王五，您可以发邮件至个人邮箱wangwu@qq.com 联系我。我来自广州市天河区，现在是一名软件工程师。工作请联络李靖童，工作邮箱panxiaofengstudio@163.com, 我的手机号码是13267436701，主页是www.baidu.com,http://dds.cn,请你一定要来湖南省衡阳市珠晖区这边来看看。";
//    std::string single_string = "我叫王五，您可以发邮件至个人邮箱:wangwu@qq.com 联系我。我来自广州市天河区，现在是一名软件工程师。工作请联络李靖童，工作邮箱：panxiaofengstudio@163.com";
    tokenizer->EncodePairStrings(input, &encoding);

    //运行模型推理
    torch::Tensor input_ids = tensorFromVector(encoding.GetIds()).unsqueeze(0);
    torch::Tensor token_type_ids = tensorFromVector(encoding.GetTypeIds()).unsqueeze(0);
    torch::Tensor attention_mask = tensorFromVector(encoding.GetAttentionMask()).unsqueeze(0);

    // 模型推断并获取结果元组
    auto outputs = module.forward({input_ids, attention_mask, attention_mask}).toTuple();
    at::Tensor output = outputs->elements()[0].toTensor();

    // 获取预测结果
    auto predictions = output.argmax(2);

//    std::cout << predictions << std::endl;

    //    std::string results = "我 叫 王 五 ， 您 可 以 发 邮 件 至 个 人 邮 箱 : wang ##wu @ qq . com 联 系 我 。 我 来 自 广 州 市 天 河 区 ， 现 在 是 一 名 软 件 工 程 师 。 工 作 请 联 络 李 靖 童 ， 工 作 邮 箱 ： pan ##xi ##ao ##fe ##ng ##st ##ud ##io @ 163 . com";
    std::string results;
    tokenizer->Decode(encoding.GetIds(),&results, true);
    //     std::cout << results <<std::endl;
    std::vector<std::string> origin_token = split_string_by_space(results);
//    std::cout << origin_token.size() << std::endl;

    // id2label映射
    std::map<int, std::string> id2label = {
        {0, "O"}, {1, "B-position"}, {10, "I-company"}, {11, "B-book"}, {12, "I-book"},
        {13, "B-address"}, {14, "I-address"}, {15, "B-scene"}, {16, "I-scene"},
        {17, "B-mobile"}, {18, "I-mobile"}, {19, "B-email"}, {2, "I-position"},
        {20, "I-email"}, {21, "B-game"}, {22, "I-game"}, {23, "B-government"},
        {24, "I-government"}, {25, "B-QQ"}, {26, "I-QQ"}, {27, "B-vx"}, {28, "I-vx"},
        {3, "B-name"}, {4, "I-name"}, {5, "B-movie"}, {6, "I-movie"}, {7, "B-organization"},
        {8, "I-organization"}, {9, "B-company"}
    };


    // 构建predicted_token_class
    std::vector<std::string> predicted_token;
    // 获取Tensor的数据指针
    auto preds_ptr = predictions.data_ptr<int64_t>();  // 假设tensor的数据类型是int64_t
    // 获取Tensor的元素数量
    int64_t num_preds = predictions.numel();

    // 遍历Tensor
    for (int64_t i = 0; i < num_preds; ++i) {
        // 使用id2label映射（假设已定义）来打印每个预测的标签
        //             std::cout << "Element " << i << ": " << preds_ptr[i] << std::endl;
        predicted_token.push_back(id2label[preds_ptr[i]]);
    }

    //         for (int t : predictions[0]) {  // 得到第一行的预测
    //             predicted_token.push_back(id2label[t]);
    //         }

    //    std::vector<std::string> origin_token = {"[CLS]", "我", "叫", "王", "五", "，", "您", "可", "以", "发", "邮", "件", "至", "个", "人", "邮", "箱", ":", "wang", "##wu", "@", "qq", ".", "com", "联", "系", "我", "。", "我", "来", "自", "广", "州", "市", "天", "河", "区", "，", "现", "在", "是", "一", "名", "软", "件", "工", "程", "师", "。", "工", "作", "请", "联", "络", "李", "靖", "童", "，", "工", "作", "邮", "箱", "：", "pan", "##xi", "##ao", "##fe", "##ng", "##st", "##ud", "##io", "@", "163", ".", "com", "[SEP]"};
    //    std::vector<std::string> predicted_token = {"O", "O", "O", "B-name", "I-name", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "O", "B-email", "I-email", "I-email", "I-email", "I-email", "I-email", "O", "O", "O", "O", "O", "O", "O", "B-address", "I-address", "I-address", "I-address", "I-address", "I-address", "O", "O", "O", "O", "O", "O", "B-position", "I-position", "I-position", "I-position", "I-position", "O", "O", "O", "O", "O", "O", "B-name", "I-name", "I-name", "O", "O", "O", "O", "O", "O", "B-email", "I-email", "I-email", "I-email", "I-email", "I-email", "I-email", "I-email", "I-email", "I-email", "I-email", "I-email", "O"};
//    std::cout << predicted_token.size() << std::endl;
    auto res = get_output_format(origin_token, predicted_token);


//    std::string text = "Here are some URLs: https://www.example.com, http://example.org, and ftp://example.net and baidu.com and www.dd.com www.comd.cn";


    // 打印结果
    for (auto& pair : res) {
        std::cout << pair.first << ": ";
        for (auto& token : pair.second) {
            std::cout << token << " ";
        }
        std::cout << std::endl;
    }
    std::cout << "ner finished" << std::endl;
    auto end = std::chrono::high_resolution_clock::now();
    // 计算持续时间
    auto duration1 = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
    // 输出运行时间
    std::cout << "Time taken: " << duration1.count() << " ms" << std::endl;

    return res;
}
