#include "Tokenizer.h"
#include <iostream>
#include <fstream>
#include <regex>
#undef LOG_DOMAIN
#define LOG_DOMAIN 0x0000

#undef LOG_TAG
#define LOG_TAG "TextEntry"

namespace localvit {
    Tokenizer::Tokenizer() {
        vocabs_ = new Node();
    }

    Tokenizer::~Tokenizer() {
        delete vocabs_;
        vocabs_ = nullptr;
    }

    void Tokenizer::Insert(string word) {
        const char *chs = word.c_str();
        int len = word.length();
        Node *tmp = vocabs_;
        for (int i = 0; i < len; i++) {
            int8_t ch = static_cast<int8_t>(chs[i]);
            if (tmp->sons.find(ch) == tmp->sons.end()) {
                tmp->sons[ch] = Node();
            }
            tmp = &(tmp->sons[ch]);
            if (i == (len - 1)) {
                tmp->index = static_cast<int32_t>(words_.size());
                words_.push_back(word);
            }
        }
    }

    void Tokenizer::LoadVocab(const char *labelData, size_t labelLength) {
        string label = "";
        for (size_t i = 0; i < labelLength; i++) {
            if (labelData[i] != '\n') {
                label += labelData[i];
                continue;
            }
            if (!label.empty()) {
                Insert(label);
            }
            label = "";
        }
    }

    int Tokenizer::Find(string word) {
        const char *chs = word.c_str();
        int len = word.length();
        Node *node = vocabs_;
        for (int i = 0; i < len; i++) {
            int8_t tmp = static_cast<int8_t>(chs[i]);
            if (node->sons.find(tmp) == node->sons.end()) {
                return unk_id_;
            } else {
                node = &(node->sons[tmp]);
                if (i == len - 1 && node->index == -1) {
                    return unk_id_;
                }
                if (i == len - 1) {
                    return node->index;
                }
            }
        }
        return unk_id_;
    }

    bool Tokenizer::Find(int8_t ch, Node *node) {
        if (node->sons.find(ch) == node->sons.end()) {
            return false;
        } else {
            return true;
        }
    }

    int get_utf8_char_length(char first_byte) {
        if ((first_byte & 0x80) == 0) return 1;  // ASCII (1 byte)
        if ((first_byte & 0xE0) == 0xC0) return 2;  // 2 bytes
        if ((first_byte & 0xF0) == 0xE0) return 3;  // 3 bytes (中文)
        if ((first_byte & 0xF8) == 0xF0) return 4;  // 4 bytes
        return 1;
    }

    void Tokenizer::CHWordDeal(string token, vector<int> &inputs_id, vector<string> &tokens,
                               vector<int> &offset, int &point) {
        int offsetStart = point;
        int offsetEnd = point;
        string tmp = token;
        while (tmp.length() > 0) {
            int oneLen = get_utf8_char_length(tmp.c_str()[0]);
            string word = tmp.substr(0, oneLen);
            int index = Find(word);
            offsetEnd++;
            if (index == unk_id_) {
                inputs_id.push_back(unk_id_);
                tokens.push_back("[UNK]");
                offset.push_back(offsetStart);
                offset.push_back(offsetEnd);
                offsetStart = offsetEnd;
            } else {
                inputs_id.push_back(index);
                tokens.push_back(words_[index]);
                offset.push_back(offsetStart);
                offset.push_back(offsetEnd);
                offsetStart = offsetEnd;
            }
            if (oneLen > tmp.length() - 1) {
                point = offsetEnd;
                return;
            }
            tmp = tmp.substr(oneLen);
            if (inputs_id.size() >= 127) {
                return;
            }
        }
        point = offsetEnd;
    }

    void Tokenizer::ENWordDeal(string token, vector<int> &inputs_id, vector<string> &tokens,
                               vector<int> &offsets, int &point) {
        string wtmp = token;
        const char *chs = wtmp.c_str();
        Node *tmp = vocabs_;
        Node *leafTmp = nullptr;
        int start = 0;
        int index = -1;
        int i = 0;
        int offsetStart = point;
        int offsetEnd = point + token.length();
        while (wtmp.length() > 0 && i < wtmp.length()) {
            int8_t ch = static_cast<int8_t>(chs[i]); //如果允许大写字母使用tolower(chs[i]);
            if (!Find(ch, tmp)) {
                if (i == 0) {
                    inputs_id.push_back(unk_id_);
                    tokens.push_back("[UNK]");
                    offsets.push_back(offsetStart);
                    offsets.push_back(offsetEnd);
                    return;
                } else if (index == -1) {
                    if (leafTmp) {
                        offsets.push_back(offsetStart);
                        offsets.push_back(offsetEnd);
                        inputs_id.push_back(leafTmp->index);
                        tokens.push_back(words_[leafTmp->index]);
                        wtmp = "##" + wtmp.substr(start);
                    } else {
                        offsets.push_back(offsetStart);
                        offsets.push_back(offsetEnd);
                        inputs_id.push_back(unk_id_);
                        tokens.push_back("[UNK]");
                        return;
                    }
                } else {
                    offsets.push_back(offsetStart);
                    offsets.push_back(offsetEnd);
                    inputs_id.push_back(index);
                    tokens.push_back(words_[index]);
                    wtmp = "##" + wtmp.substr(i);
                }
                i = -1;
                tmp = vocabs_;
                index = -1;
                chs = wtmp.c_str();
                leafTmp = nullptr;
                start = 0;
            } else {
                tmp = &(tmp->sons[ch]);
                if (tmp->index > -1) {
                    leafTmp = tmp;  // 记录已经匹配的叶子节点
                    start = i + 1;  //记录已经匹配的叶子节点的结束位置
                    if (i == wtmp.size() - 1) {
                        offsets.push_back(offsetStart);
                        offsets.push_back(offsetEnd);
                        inputs_id.push_back(tmp->index);
                        tokens.push_back(words_[tmp->index]);
                        wtmp = "";
                    }
                } else {
                    if (i == wtmp.size() - 1) {
                        if (leafTmp) {
                            offsets.push_back(offsetStart);
                            offsets.push_back(offsetEnd);
                            inputs_id.push_back(leafTmp->index);
                            tokens.push_back(words_[leafTmp->index]);
                            wtmp = "##" + wtmp.substr(start);
                            i = -1;
                            tmp = vocabs_;
                            index = -1;
                            chs = wtmp.c_str();
                            leafTmp = nullptr;
                            start = 0;
                            chs = wtmp.c_str();
                        } else {
                            offsets.push_back(offsetStart);
                            offsets.push_back(offsetEnd);
                            inputs_id.push_back(unk_id_);
                            tokens.push_back("[UNK]");
                            wtmp = "";
                        }
                    }
                }
                index = tmp->index;
            }
            i++;
            if (inputs_id.size() >= 127) {
                return;
            }
        }
        point = offsetEnd;
    }

    void
    Tokenizer::EncodeText(const std::string &text, vector<int> &inputs_id, vector<string> &tokens,
                          vector<int> &offset) {
        std::regex re(
                R"(\s+|[a-zA-Z]ü|[[:alpha:]]+|[[:digit:]]+|[^\s[:alpha:][:digit:]]+[\r\n]*|\s*[\r\n]+|\s+(?!\S))");
        string token;
        smatch match;
        string input = text;
        inputs_id.push_back(cls_id_);
        int start = 0;
        while (regex_search(input, match, re)) {
            token = match.str(0);
            input = match.suffix().str();
            const char *chs = token.c_str();
            if (chs[0] == ' ') {
                start += 1;
                continue;
            }
            int chOrEn = get_utf8_char_length(chs[0]);
            if (chOrEn == 1) {
                ENWordDeal(token, inputs_id, tokens, offset, start);
            } else {
                CHWordDeal(token, inputs_id, tokens, offset, start);
            }
        }
        inputs_id.push_back(sep_id_);
    }
}