#include "PageLibPreProcessor.h"
#include <cmath>

PageLibPreProcessor::PageLibPreProcessor(SplitTool *cuttor)
    : _pageList(), _offsetLib(), _invertIndexLib(), _wordCutter(cuttor), _tfList(), _unique_word() {
    // 构造函数，用于初始化网页库读取和偏移库读取
}

void PageLibPreProcessor::cutReddundantPage() {
    // 打开日志，记录错误
    // string log = "/home/kyle/Project/WebDisk/log/newpage.log";
    // ofstream ofs_log(log, std::ios::app);

    // 去重操作
    // 打开网页库和偏移库
    ifstream ifs_pages("/home/kyle/Project/WebDisk/data/ripepage.dat");
    ifstream ifs_offset("/home/kyle/Project/WebDisk/data/offset.dat");
    if (!ifs_offset.is_open() || !ifs_pages.is_open()) {
        std::cerr << "Unable to open one or both files\n";
        return;
    }

    // 遍历每个文件，将网页以string形式传递给webpages类
    size_t start_id = 1;
    size_t start_pos = 0;

    unordered_set<size_t> seenHashes; // 存储
    int docid;
    int pos;
    int length;
    string webpage_content;
    while (ifs_offset >> docid >> pos >> length) {
        cout << docid << " " << pos << " " << length << "\n";
        // 读取指定长度的内容
        webpage_content.clear();
        ifs_pages.seekg(pos, std::ios::beg);
        webpage_content.resize(length);
        ifs_pages.read(&webpage_content[0], length);

        WebPage web(webpage_content);

        // 判断是否存在重复
        bool is_repeat = false;
        size_t hashValue = web.getHash();
        // for (auto &w : _pageList) {
        //     if (w == web) {
        //         // ofs_log << w.getDocid() << "=" << web.getDocid() << " 重复\n";
        //         is_repeat = true;
        //     }
        // }

        if (seenHashes.find(hashValue) != seenHashes.end()) {
            is_repeat = true;
        } else {
            seenHashes.insert(hashValue);
        }

        if (is_repeat) {
            // ofs_log << docid << " " << pos << " 重复\n";
            continue;
        } else {
            // ofs_log << docid << " 执行了推送操作\n";
            //  无重复,推送web
            //  要更新id,pos
            web.setDocid(start_id);
            _pageList.push_back(web);
            _offsetLib[start_id] = make_pair(start_pos, length);

            wordParticiple(web.getDoc());
        }

        start_pos += length;
        ++start_id;

        if (_offsetLib.size() == 512) {
            // 避免多次IO
            // ofs_log << " 执行保存操作\n";
            storeOnDisk();
            _pageList.clear();
            _offsetLib.clear();
        }
    }

    // 确保在结束时，将剩余的页面保存到磁盘
    if (!_pageList.empty()) {
        storeOnDisk(); // 最后一批数据不管大小都保存
        _pageList.clear();
        _offsetLib.clear();
    }

    // ofs_log.close();
    ifs_pages.close();
    ifs_offset.close();
}

void PageLibPreProcessor::storeOnDisk() {
    // 存储更新好后的网页库和偏移库
    string ripepage = "/home/kyle/Project/WebDisk/data/newripepage.dat";
    string offset = "/home/kyle/Project/WebDisk/data/newoffset.dat";

    ofstream ofs_ripe(ripepage, std::ios::app);
    ofstream ofs_off(offset, std::ios::app);

    for (auto &page : _pageList) {
        ofs_ripe << page;
    }

    for (auto &pair : _offsetLib) {
        ofs_off << pair.first << " "
                << pair.second.first << " "
                << pair.second.second << "\n";
    }

    ofs_ripe.close();
    ofs_off.close();
}

void PageLibPreProcessor::wordParticiple(const string &content) {
    // 对文章进行分词
    map<string, int> _dict; // 当前文档词频
    set<string> wordsInDoc; // 用于记录当前文档中已经遇到的词
    vector<string> words = _wordCutter->cut(content);

    // 计算当前文档的词频
    for (auto &w : words) {
        ++_dict[w];

        // 只在单词首次出现时更新 _unique_word（DF）
        if (wordsInDoc.find(w) == wordsInDoc.end()) {
            ++_unique_word[w];    // 该词在当前文档出现过
            wordsInDoc.insert(w); // 标记该词在本篇文档中出现过
        }
    }

    // 将当前文档的词频加入 _tfList
    _tfList.push_back(_dict);
}

void PageLibPreProcessor::buildInvertIndexMap() {
    // 已经有了tf df，计算idf
    int N = _tfList.size();
    unordered_map<string, double> idfValues;

    // 对于每个词，计算其 IDF
    for (const auto &entry : _unique_word) {
        const string &word = entry.first;
        int df = entry.second; // 词语的文档频率

        // IDF = log(总文档数 / 文档频率)
        double idf = log((double)N / (1 + df)); // 防止除0错误，log(0)无效，+1避免出现除0
        idfValues[word] = idf;
    }

    // 计算权重，遍历每个文章
    int docid = 1;
    for (const auto &web : _tfList) {
        map<string, double> webFreq;
        double norm = 0.0; // 保存每个文章中单词权重的集合

        // 计算每个词的 TF-IDF 并计算平方和
        for (const auto &entry : web) {
            const string &word = entry.first;

            int tf = entry.second;
            double idf = idfValues[word];

            double tfidf = tf * idf; // 单词在当前文章的权重
            webFreq[word] = tfidf;
            norm += tfidf * tfidf; // 计算平方和
        }

        norm = sqrt(norm); // 分母开方

        // 归一化每个词的权重
        for (const auto &entry : webFreq) {
            double w = entry.second / norm; // 归一化后的权重

            // 加入到倒排索引中
            string word = entry.first;

            _invertIndexLib[word].insert(make_pair(docid, w));
        }
        ++docid;
    }
}

void PageLibPreProcessor::storeIndexOnDisk() {
    // 存储更新好的倒排索引库
    string InvertIndexTable = "/home/kyle/Project/WebDisk/data/InvertIndexTable.dat";

    ofstream ofs_IIT(InvertIndexTable);

    for (const auto &ele : _invertIndexLib) {
        ofs_IIT << ele.first << " ";
        for (const auto &pair : ele.second) {
            ofs_IIT << pair.first << " " << pair.second << " ";
        }
        ofs_IIT << "\n";
    }

    ofs_IIT.close();
}