#include "keyword_processor.hpp"
#include "directory_scanner.hpp"

#include "external/utfcpp/utf8.h"
#include <fstream>
#include <sstream>
#include <algorithm>
#include <cctype>


KeyWordProcessor::KeyWordProcessor()
    : m_tokenizer("resources/dictionaries/jieba.dict.utf8",
                  "resources/dictionaries/hmm_model.utf8",
                  "resources/dictionaries/user.dict.utf8",
                  "resources/dictionaries/idf.utf8",
                  "resources/dictionaries/stop_words.utf8") {
    // 加载停用词表
    load_stop_words("resources/corpus/stopwords/en_stopwords.txt", m_enStopWords);
    load_stop_words("resources/corpus/stopwords/cn_stopwords.txt", m_chStopWords);
}


void KeyWordProcessor::load_stop_words(const std::string& filename, std::set<std::string>& stopWords) {
    std::ifstream file(filename);
    if (!file.is_open()) {
        std::cerr << "无法打开停用词文件：" << filename << std::endl;
        return;
    }

    std::string word;
    while (std::getline(file, word)) {
        // 新增：修剪前后空白字符(包括\r\n\t空格等)
        size_t start = word.find_first_not_of(" \t\r\n");
        size_t end = word.find_last_not_of(" \t\r\n");
        if (start != std::string::npos && end != std::string::npos) {
            word = word.substr(start, end - start + 1);
        } else {
            continue; // 跳过空行
        }
        stopWords.insert(word);
    }
}


std::string KeyWordProcessor::read_file(const std::string& filename) {
    std::ifstream file(filename);
    if (!file.is_open()) {
        std::cerr << "无法打开文件：" << filename << std::endl;
        return "";
    }
    std::stringstream buffer;
    buffer << file.rdbuf();
    return buffer.str();
}


std::string KeyWordProcessor::preprocess_english(const std::string& text) {
    std::string result;
    for (char c : text) {
        if (isalpha(c)) {
            result += tolower(c);
        }
        else if (isspace(c)) {
            result += c;
        }
    }
    return result;
}


void KeyWordProcessor::create_cn_dict(const std::string& dir, const std::string& outfile) {
    std::vector<std::string> files = DirectoryScanner::scan(dir);

    std::map<std::string, int> wordCount;

    for (const std::string& file : files) {
        std::string content = read_file(file);

        // 使用cppjieba进行分词
        std::vector<std::string> words;
        m_tokenizer.Cut(content, words);

        // 过滤停用词并统计词频
        for (const std::string& word : words) {
            if (m_chStopWords.find(word) == m_chStopWords.end() && !word.empty()) {
                ++wordCount[word];
            }
        }

    }

    // 写入词典文件
    std::ofstream out(outfile);
    for (const auto& pair : wordCount) {
        out << pair.first << " " << pair.second << std::endl;
    }
}


void KeyWordProcessor::build_cn_index(const std::string& dict, const std::string& index) {
    std::ifstream dictFile(dict);
    if (!dictFile.is_open()) {
        std::cerr << "无法打开词典文件：" << dict << std::endl;
        return;
    }
    
    std::map<std::string, std::vector<std::string>> charIndex;
    std::string line;

    while (std::getline(dictFile, line)) {
        size_t spacePos = line.find(' ');
        if (spacePos != std::string::npos) {
            std::string word = line.substr(0, spacePos);

            if (!word.empty()) {
                const char* it = word.c_str();
                const char* end = word.c_str() + word.size();
                auto start = it;
                utf8::next(it, end);
                std::string firstChar(start, it);

                charIndex[firstChar].push_back(word);
            }
        }
    }
    
    // 写入索引文件
    std::ofstream indexFile(index);
    for (const auto& pair : charIndex) {
        indexFile << pair.first << " " << pair.second.size();
        for (const std::string& word : pair.second) {
            indexFile << " " << word;
        }

        indexFile << std::endl;
    }

}


void KeyWordProcessor::create_en_dict(const std::string& dir, const std::string& outfile) {
    std::vector<std::string> files = DirectoryScanner::scan(dir);
    std::map<std::string, int> wordCount;

    for (const std::string& file : files) {
        std::string content = read_file(file);
        std::string processed = preprocess_english(content);

        // 按空格分隔单词
        std::stringstream ss(processed);
        std::string word;

        while (ss >> word) {
            if (m_enStopWords.find(word) == m_enStopWords.end()) {
                ++wordCount[word];
            }
        }
    }

    // 写入词典文件
    std::ofstream out(outfile);
    for (const auto& pair : wordCount) {
        out << pair.first << " " << pair.second << std::endl;
    }
}


void KeyWordProcessor::build_en_index(const std::string& dict, const std::string& index) {
    std::ifstream dictFile(dict);
    if (!dictFile.is_open()) {
        std::cerr << "无法打开词典文件：" << dict << std::endl;
        return;
    }

    std::map<char, std::vector<std::string>> charIndex;

    std::string line;
    while (std::getline(dictFile, line)) {
        size_t spacePos = line.find(' ');
        if (spacePos != std::string::npos) {
            std::string word = line.substr(0, spacePos);
            if (!word.empty()) {
                char firstChar = word[0];
                charIndex[firstChar].push_back(word);
            }
        }
    }

    // 写入索引文件
    std::ofstream indexFile(index);
    for (const auto& pair : charIndex) {
        indexFile << pair.first << " " << pair.second.size();
        for (const std::string& word : pair.second) {
            indexFile << " " << word;
        }
        indexFile << std::endl;
    }
}


void KeyWordProcessor::process(const std::string& chDir, const std::string& enDir) {
    // 处理中文语料
    create_cn_dict(chDir, "resources/dict/cn_dict.txt");
    build_cn_index("resources/dict/cn_dict.txt", "resources/index/cn_index.txt");

    // 处理英文语料
    create_en_dict(enDir, "resources/dict/en_dict.txt");
    build_en_index("resources/dict/en_dict.txt", "resources/index/en_index.txt");
}
