#include <unistd.h>
#include <cctype>
#include <filesystem>
#include <iostream>
#include <fstream>
#include <sstream>
#include "../include/KeyWordProcessor.h"
#include "../include/DirectoryScanner.h"

using std::string;
using std::set;
using std::cout;
using std::endl;
using std::vector;
using std::ofstream;
using std::ifstream;
using std::istringstream;
using std::map;
using std::isalpha;
using std::isupper;
using namespace std::filesystem;

const string en_dict = "../libfile/en_dict.dat";
const string en_index = "../libfile/en_index.dat";
const string cn_dict = "../libfile/cn_dict.dat";
const string cn_index = "../libfile/cn_index.dat";


template<class T>
static void print(T t)
{

    for(auto &e : t)
    {
        cout << e << endl;
    }
}

template<>
void print<map<string, int>>(map<string, int> m1)
{
    for(auto &[word, frequency] : m1)
    {
        cout << word << " " << frequency << endl;
    }
}

inline bool chinese_word(uint64_t codePoint)
{
    return (codePoint > 0x4E00 && codePoint < 0x9FFF) ||
           (codePoint > 0x3400 && codePoint < 0x4DBF) ||
           (codePoint > 0x20000 && codePoint < 0x2A6DF) ||
           (codePoint > 0x2A700 && codePoint < 0x2B73F) ||
           (codePoint > 0x2B740 && codePoint < 0x2B81F) ||
           (codePoint > 0x2B820 && codePoint < 0x2CEAF) ||
           (codePoint > 0x2CEB0 && codePoint < 0x2EBEF) ||
           (codePoint > 0x30000 && codePoint < 0x3134F);
}

KeyWordProcessor::KeyWordProcessor()
:m_tokenizer(), m_cnWordFrequency(), m_enWordFrequency()
{
    setCnStopWords("../corpus/stopwords/cn_stopwords.txt");
    setEnStopWords("../corpus/stopwords/en_stopwords.txt");
}

void KeyWordProcessor::setCnStopWords(const string& path)
{
    ifstream ifs{path};
    string line;
    while(getline(ifs, line))
    {
        istringstream iss{line};
        string word;
        while(iss >> word)
        {
            m_cnStopWords.insert(word);
        }
    }
    ifs.close(); 
}

void KeyWordProcessor::setEnStopWords(const string& path)
{
    ifstream ifs{path};
    string line;
    while(getline(ifs, line))
    {
        istringstream iss{line};
        string word;
        while(iss >> word)
        {
            m_enStopWords.insert(word);
        }
    }
    ifs.close(); 
}

void KeyWordProcessor::getCnWordFrequency(const vector<string> &corpus)
{
    for(auto fileIt = corpus.begin(); fileIt != corpus.end(); ++fileIt)
    {
        string fileName = *fileIt;
        ifstream ifs{fileName};
        string line;
        vector<string> words;
        while(getline(ifs, line))
        {
            m_tokenizer.Cut(line, words);
            for(const auto &noParticialWord : words)
            {
                // 根据码点判断是否是汉字
                // const char *it1 = noParticialWord.c_str();
                // const char *end1 = noParticialWord.c_str() + noParticialWord.size();

                // string word;
                // auto start = it1;
                
                // utf8::next(it1, end1);
                // word= string{start, it1};

                auto it = utf8::iterator
                {
                    noParticialWord.begin(),
                    noParticialWord.begin(),
                    noParticialWord.end()
                };
                char32_t codePoint = *it;
                
                if(!chinese_word(codePoint))
                {
                    continue;
                }
                if(m_cnStopWords.find(noParticialWord) == m_cnStopWords.end())
                {
                    ++m_cnWordFrequency[noParticialWord];
                }
            }
        }
        ifs.close();
    }
}

void KeyWordProcessor::getEnWordFrequency(const vector<string> &corpus)
{
    // 读入文件，一行一行读取，按数字，标点符号，空白字符进行分词
    // 一个map<string, int>记录单词出现次数
    for(auto fileIt = corpus.begin(); fileIt != corpus.end(); ++fileIt)
    {
        string fileName = *fileIt;
        ifstream ifs{fileName};
        string line;
        while(getline(ifs, line))
        {
            istringstream iss{line};
            string noParticialWord;

            // 遍历一行，以空白字符分割，筛选出所有包含数字和标点的'词'
            while(iss >> noParticialWord)
            {
                // 遍历一个单词，以非英文字符进行分割
                string word;
                for(auto &c : noParticialWord)
                {
                    if(isalpha(c))
                    {
                        if(isupper(c))
                        {
                            c = std::tolower(c);
                        }
                        word.push_back(c);
                    }else if(!word.empty())
                    {
                        // 过滤停用词
                        if(m_enStopWords.find(word) == m_enStopWords.end())
                        {
                            ++m_enWordFrequency[word];
                        }
                        word.clear();
                    }
                }
                if(!word.empty())
                {
                    if(m_enStopWords.find(word) == m_enStopWords.end())
                    {
                        ++m_enWordFrequency[word];
                    }
                }
            }
        }
        ifs.close();
    }
}

void KeyWordProcessor::process(const string &cnDir, const string &enDir)
{
    vector<string>cnCorpus = DirectoryScanner::scan(cnDir);
    vector<string>enCorpus = DirectoryScanner::scan(enDir);

    getCnWordFrequency(cnCorpus);
    create_cn_dict(cn_dict);
    build_cn_index(cn_dict, cn_index);

    getEnWordFrequency(enCorpus);
    create_en_dict(en_dict);
    build_en_index(en_dict, en_index);

}

void KeyWordProcessor::create_cn_dict(const string& path)
{
    ofstream ofs{path};
    for(const auto &[word, frequency] : m_cnWordFrequency)
    {
        ofs << word << " " << frequency << endl;
    }

    ofs.close();   
}

void KeyWordProcessor::build_cn_index(const string& dict, const string& index)
{
    ifstream ifs{dict};
    string line;
    int count = 1;
    while(getline(ifs, line))
    {
        istringstream iss{line};
        string word;
        iss >> word;
        auto it = word.begin();
        auto end = word.begin() + word.size();

        while(it != end)
        {
            auto start = it;
            utf8::next(it, end);

            string alpha{start, it};
            m_cnAlphaIndex[alpha].insert(count);
        }
        ++count;
    }
    ofstream ofs{index};
    for(const auto &[alpha, row] : m_cnAlphaIndex)
    {
        ofs << alpha << " ";
        for(const auto &rowNum : row)
        {
            ofs << rowNum << " ";
        }
        ofs << endl;
    }
    ifs.close();
    ofs.close(); 
}

void KeyWordProcessor::create_en_dict(const string& path)
{
    ofstream ofs{path};
    for(const auto &[word, frequency] : m_enWordFrequency)
    {
        ofs << word << " " << frequency << endl;
    }

    ofs.close();
}

void KeyWordProcessor::build_en_index(const string& dict, const string& index)
{
    ifstream ifs{dict};
    string line;
    int count = 1;
    while(getline(ifs, line))
    {
        istringstream iss{line};
        string word;
        iss >> word;
        for(auto &c : word)
        {
            m_enAlphaIndex[c].insert(count);
        }
        ++count;
    }
    ofstream ofs{index};
    for(const auto &[alpha, row] : m_enAlphaIndex)
    {
        ofs << alpha << " ";
        for(const auto &rowNum : row)
        {
            ofs << rowNum << " ";
        }
        ofs << endl;
    }
    ifs.close();
    ofs.close();
}