#include "PagePreprocess/PageLibPreprocessor.h"
#include <fstream>
#include <sstream>
#include <cstdlib>
using std::ofstream;

PageLibPreprocessor::PageLibPreprocessor()
    : _simhasher("data/dict/jieba.dict.utf8",
                 "data/dict/hmm_model.utf8",
                 "data/dict/idf.utf8",
                 "data/dict/stop_words.utf8"),
      _jieba("data/dict/jieba.dict.utf8",
             "data/dict/hmm_model.utf8",
             "data/dict/user.dict.utf8",
             "data/dict/idf.utf8",
             "data/dict/stop_words.utf8"),
      _xmldoc()
{
    LodoffsetLib("data/offsetLib.dat", _offset);
    LodoffsetLib("data/newoffsetLib.dat", _newoffset);

    // 加载中文停止词
    ifstream ifs_ch_stop("data/yuliao/stop_words_zh.txt");
    if (!ifs_ch_stop.is_open())
    {
        LogError("stop_words_eng.txt open failed");
    }
    string ch_word;
    while (ifs_ch_stop >> ch_word)
    {
        _stop_ch.insert(ch_word);
    }
}

void PageLibPreprocessor::Preprocessor()
{
    // 清理库
    remove("data/newripepage.dat");
    remove("data/newoffsetLib.dat");
    // 循环加载每一个网页
    WebPage *webpage;
    for (size_t id = 0; id < _offset.size(); ++id)
    {
        webpage = LodPageLib(id, "data/ripepage.dat", _offset);
        if (webpage)
        {
            if (compare(webpage)) // 如果此网页重复
            {
                continue;
            }
            // 如果不重复，存入磁盘，并生成网页偏移库
            store(webpage);
            delete webpage;
        }
    }
    store_newoffset();
}

// 从网页库中加载指定id的网页
WebPage *PageLibPreprocessor::LodPageLib(int id, const string &path, map<int, pair<int, int>> &off_set)
{
    WebPage *wbp = new WebPage();
    if (off_set.find(id) == off_set.end())
    {
        LogDebug("加载网页库,此id不存在:%d", id);
        return nullptr;
    }

    int offset = off_set[id].first;
    int leng = off_set[id].second;

    ifstream ifs(path, std::ios::binary);
    if (!ifs.is_open())
    {
        LogError("打开 %s 失败", path.c_str());
        exit(1);
    }
    ifs.seekg(offset);
    char *buffer = new char[leng + 1];
    ifs.read(buffer, leng);
    buffer[leng] = '\0'; // 将字符串的最后一个字节换成终止符

    // LogInfo("网页id:%d,偏移量：%d,长度：%d", id, offset, leng);
    //  LogDebug("%s",buffer);

    //XMLDocument _xmldoc;
    _xmldoc.Parse(buffer);
    if (_xmldoc.ErrorID())
    {
        LogError("加载网页时,xml解析出错 id:%d", id);
        return nullptr;
    }

    XMLElement *itemNode = _xmldoc.FirstChildElement("doc");
    if (itemNode)
    {
        const char *docid = itemNode->FirstChildElement("docid")->GetText();
        if (docid)
        {
            wbp->_id = atoi(docid);
        }
        else
        {
            wbp->_id = 0;
        }
        const char *title = itemNode->FirstChildElement("title")->GetText();
        if (title)
        {
            wbp->_title = title;
        }
        const char *link = itemNode->FirstChildElement("link")->GetText();
        if (link)
        {
            wbp->_link = link;
        }
        const char *des = itemNode->FirstChildElement("description")->GetText();
        if (des)
        {
            wbp->_description = des;
        }
        itemNode->NextSiblingElement("doc");
    }
    delete[] buffer;

    return wbp;
}

// 加载网页偏移库
void PageLibPreprocessor::LodoffsetLib(const string &path, map<int, pair<int, int>> &off_set)
{
    ifstream ifs(path);
    if (!ifs.is_open())
    {
        LogError("打开 %s 失败", path.c_str());
        return;
    }
    string line;
    int id, offset, leng;
    while (getline(ifs, line))
    {
        istringstream iss(line);
        iss >> id;
        iss >> offset;
        iss >> leng;
        off_set[id] = {offset, leng};
    }
}

// 查重
bool PageLibPreprocessor::compare(WebPage *web)
{
    if (web)
    {
        // 计算指纹
        size_t topN = 5;
        uint64_t u64 = 0;
        _simhasher.make(web->_description, topN, u64);
        // 与库中所有page的特征进行对比
        for (auto &it : _feature)
        {
            if (Simhasher::isEqual(u64, it.second, 5))
            {
                LogDebug("网页 %d 和 %d 重复", web->_id, it.first);
                return true;
            }
        }
        _feature[web->_id] = u64;
        return false;
    }
    return true;
}

void PageLibPreprocessor::store(WebPage *web)
{
    static int offsize = 0;

    ofstream ofs("data/newripepage.dat", std::ios::app);
    if (!ofs.is_open())
    {
        LogError("打开newripepage失败");
        return;
    }

    ostringstream oss;
    oss << "<doc>\n\t<docid>" << web->_id
        << "</docid>\n\t<title>" << web->_title
        << "</title>\n\t<link>" << web->_link
        << "</link>\n\t<description>" << web->_description << "</description>\n</doc>";
    oss << '\n';

    string str = oss.str();
    _newoffset[web->_id].first = offsize;
    _newoffset[web->_id].second = str.size();
    offsize += str.size();

    ofs << str;
}

void PageLibPreprocessor::store_newoffset()
{
    ofstream ofs("data/newoffsetLib.dat");
    if (!ofs)
    {
        LogError("open data/newoffsetLib.dat fail!");
        return;
    }
    for (auto &it : _newoffset)
    {
        ofs << it.first << " " << it.second.first << " " << it.second.second << "\n";
    }
}

// 生成倒排索引
void PageLibPreprocessor::bulidInvertIndex()
{
    remove("data/invertIndex.dat");
    // 遍历所有文章
    WebPage *wbp;
    auto it = _newoffset.rbegin();
    int maxid = it->first + 1;

    for (int id = 0; id < maxid; ++id)
    {
        wbp = LodPageLib(id, "data/newripepage.dat", _newoffset);
        if (wbp)
        {
            // 使用结巴分词
            vector<string> words;
            string desc = wbp->_description + wbp->_title;
            _jieba.Cut(desc, words, true);
            // 创建单词索引
            for (auto &word : words)
            {
                if (_stop_ch.find(word) == _stop_ch.end())
                {
                    ++_wordfrequency[id][word];
                    ++_invertIndexLib[word][id];
                }
            }
        }
        delete wbp;
    }

    for (auto &it : _wordfrequency)
    {   
        int id = it.first;
        int TF, DF;
        double IDF, w, sum;
        LogDebug("计算权重 id:%d", id);
        for (auto &ch : it.second)
        {
            // 使用TF/IDF算法
            string word = ch.first;
            TF = ch.second;                    // 单词在一个文章中出现的次数
            DF = _invertIndexLib[word].size(); // 包含该单词的文章数
            IDF = log2(_newoffset.size() / (DF + 1));
            w = TF * IDF;
            sum += pow(w,2);
            _invertIndexLib[word][id] = w;
        }
        _sum_wight[id] = sum;
        //再遍历一遍，将权重进行归一化处理
        for (auto &ch : it.second)
        {
            // 使用TF/IDF算法
            string word = ch.first;
            w = _invertIndexLib[word][id];
            _invertIndexLib[word][id] = w/sqrt(sum);
        }
    }

    //存放倒排索引
    LogInfo("将倒排索引存入invertIndex.dat");
    ofstream ofs("data/invertIndex.dat");
    for(auto & it :_invertIndexLib){
        ofs<<it.first;
        for(auto & ch : it.second){
            ofs<<" "<<ch.first<<" "<<ch.second;
        }
        ofs<<"\n";
    }
}