#include "PageLibPreprocessor.h"

PageLibPreProcessor::PageLibPreProcessor()
    : _pageList(), _fingerprint(), _offsetLib()
{
    Configuration &conf = Configuration::GetConfiguration();
    conf.ReadConfiguration("conf/myconf.conf");
}

PageLibPreProcessor::~PageLibPreProcessor()
{
}

void PageLibPreProcessor::cutRedudantPage()
{
    // 读取配置文件
    Configuration &conf = Configuration::GetConfiguration();

    set<string> pageLibDict = conf._configuration["PageLibDict"];
    if (pageLibDict.empty())
    {
        std::cerr << "Error: Missing PageLibDict in configuration.\n";
        return;
    }

    vector<string> allFiles = conf.ReadFilesInDirectory(*pageLibDict.begin());
    _fingerprint.clear();

    // 遍历文件夹内的文件
    for (auto &filename : allFiles)
    {
        std::cerr << "Processing file: " << filename << "\n";

        vector<WebPage> items = _wordCutter.split_XML(filename); // 分割出来一个文件中所有<doc>
        if (items.empty())
        {
            std::cerr << "Error: No items parsed from file: " << filename << std::endl;
            continue;
        }

        Simhasher simhasher("dict/jieba.dict.utf8",
                            "dict/hmm_model.utf8",
                            "dict/idf.utf8",
                            "dict/stop_words.utf8");
        size_t topN = 5;

        // 生成指纹
        vector<uint64_t> currentFingerprint; // 将一个文件的所有<content>指纹保存到一个vector里
        for (auto &item : items)
        {
            if (item._link.empty())
            {
                std::cerr << "Warning: Empty link in WebPage. Skipping..." << std::endl;
                continue;
            }

            uint64_t u64 = 0;
            simhasher.make(item._content, topN, u64);
            currentFingerprint.push_back(u64);
        }

        // 去重逻辑
        for (size_t idx = 0; idx < currentFingerprint.size(); ++idx) // 指纹的vector下标和vector<WebPage>下标一致
        {
            bool isUnique = true;                     // 设置标志位，默认是独一无二的
            for (const auto &globalFp : _fingerprint) // 遍历指纹，找出重复的指纹
            {
                if (Simhasher::isEqual(currentFingerprint[idx], globalFp, 2))
                {
                    isUnique = false; // 重复就设置为false
                    break;
                }
            }

            if (isUnique) // 只有当指纹是独一无二的时候，才加入数据成员的网页库中
            {
                _fingerprint.push_back(currentFingerprint[idx]); // 独一无二的指纹库
                _pageList.push_back(items[idx]);                 // 独一无二的网页库
            }
        }
    }

    // 生成网页偏移库
    size_t pos = 0;

    for (size_t idx = 0; idx < _pageList.size(); ++idx)
    {
        const WebPage &page = _pageList[idx];

        string doc = "<doc>\n";
        doc += "    <title>" + page._title + "</title>\n";
        doc += "    <link>" + page._link + "</link>\n";
        doc += "    <content>" + page._content + "</content>\n";
        doc += "</doc>\n";

        size_t length = doc.size();

        _offsetLib.push_back({pos, length});

        pos += length;
    }
}

void PageLibPreProcessor::bulidInvertIndexMap()
{

    SplitToolCppJieba split;
    vector<string> allWords;
    map<string, set<int>> passageCount; // 统计某个单词出现在了哪些文章中
    for (size_t idx = 0; idx < _pageList.size(); ++idx)
    {
        // 分词、去掉停用词
        WebPage page = _pageList[idx];
        vector<string> onePageWords = split.cut(page._content); // 一篇文章中的所有单词

        map<string, int> wordFrequency; // 统计每个单词在这一篇文章的次数;
        for (size_t wordNum = 0; wordNum < onePageWords.size(); ++wordNum)
        {
            wordFrequency[onePageWords[wordNum]]++;          // 统计每个单词在这一篇文章的次数
            passageCount[onePageWords[wordNum]].insert(idx); // 统计这个单词出现在了这个文章中
        }

        for (auto word : wordFrequency)
        {
            // 某个单词  出现在<docid> = idx这篇文章中的    总次数
            _invertIndexLib[word.first].insert({idx, word.second});
        }

        allWords.insert(allWords.end(), onePageWords.begin(), onePageWords.end());
    }

    for (auto &ele : _invertIndexLib)
    {
        double DF = ele.second.size();
        // cerr << "DF = " << DF << " ";
        double N = _pageList.size();
        // cerr << "N = " << N << " ";
        double IDF = log2(N / (DF + 1));
        // cerr <<"IDF = " << IDF << "\n";

        set<pair<int, double>> newSet;
        for (auto &elem : ele.second)
        {
            // size_t docid = elem.first;
            double TF = elem.second;
            double w = TF * IDF;

            newSet.insert({elem.first, w});
            // wSet.push_back(w);
        }
        ele.second = move(newSet);
    }

    for (int docId = 0; docId < (int)_pageList.size(); ++docId)
    {
        double quadraticSum = 0.0;
        // 计算当前文档的权重平方和
        for (const auto &ele : _invertIndexLib)
        {
            for (auto &elem : ele.second)
            {
                if (elem.first == docId)
                {
                    quadraticSum += elem.second * elem.second;
                }
            }
        }
        quadraticSum = sqrt(quadraticSum);

        // 对文档的权重进行归一化
        // double num = 0.0;
        for (auto &ele : _invertIndexLib)
        {
            set<pair<int, double>> newSet;
            for (auto &elem : ele.second)
            {

                if (elem.first == docId)
                {
                    double w = elem.second / quadraticSum;
                    newSet.insert({docId, w});
                    // num += w;
                }
                else
                {
                    newSet.insert({elem.first, elem.second});
                }
            }
            ele.second = move(newSet);
        }
        // cerr << "num = " << num << "\n";
    }

    // //归一化完成，验证每篇文章的权重平方和是否为1
    // for (size_t docId = 0; docId < _pageList.size(); ++docId)
    // {
    //     double sum = 0.0;

    //     for (const auto &entry : _invertIndexLib)
    //     {
    //         for (auto &elem : entry.second)
    //         {

    //             if (elem.first == docId)
    //             {
    //                 sum += elem.second * elem.second;
    //             }
    //         }
    //     }

    //     std::cout << "Document " << docId << " normalization sum: " << sum << "\n";
    // }

}

void PageLibPreProcessor::storeOnDisk()
{
    // 读取配置文件
    Configuration &conf = Configuration::GetConfiguration();
    set<string> storeFileName = conf._configuration["StoreFileName"];
    set<string> storeOffersetLib = conf._configuration["StoreOffersetLib"];
    set<string> storeInvertIndex = conf._configuration["StoreInvertIndex"];
    if (storeFileName.empty())
    {
        cerr << "Error:Missing StoreFileName in configuration.\n";
        return;
    }
    if (storeOffersetLib.empty())
    {
        cerr << "Error:Missing StoreOffersetLib in configuration.\n";
        return;
    }
    if (storeInvertIndex.empty())
    {
        cerr << "Error:Missing StoreInvertIndex in configuration.\n";
        return;
    }

    // 存储去重后的网页
    _wordCutter.save_to_JSON(_pageList, *storeFileName.begin());

    // 存储网页偏移库
    ofstream ofs;
    ofs.open(*storeOffersetLib.begin());
    if (!ofs.is_open())
    {
        cerr << "打开文件失败: " << *storeOffersetLib.begin() << "\n";
        return;
    }
    for (size_t idx = 0; idx < _offsetLib.size(); ++idx)
    {
        ofs << idx << " " << _offsetLib[idx].first << " " << _offsetLib[idx].second << "\n";
    }

    // 存储倒排索引库
    ofstream ofsIndex;
    ofsIndex.open(*storeInvertIndex.begin());
    if (!ofsIndex.is_open())
    {
        cerr << "打开文件失败: " << *storeInvertIndex.begin() << "\n";
        return;
    }
    for (auto &ele : _invertIndexLib)
    {
        ofsIndex << ele.first << " ";
        for (auto &elem : ele.second)
        {
            ofsIndex << elem.first << " " << elem.second << " ";
        }
        ofsIndex << "\n";
    }

    // // 测试网页偏移量是否正确
    // ifstream ifs("data/newripepage.dat", std::ios::binary);
    // if (!ifs.is_open())
    // {
    //     std::cerr << "Error: Unable to open page library file: " << "data/newripepage.dat\n";
    //     return;
    // }
    // for (size_t idx = 0; idx < 3; ++idx)
    // {
    //     size_t docID = idx;
    //     size_t start = _offsetLib[idx].first;
    //     size_t length = _offsetLib[idx].second;

    //     // 定位到偏移量处
    //     ifs.seekg(start, std::ios::beg);

    //     // 读取网页内容
    //     std::string content(length, '\0');
    //     ifs.read(&content[0], length);

    //     // 输出验证信息
    //     std::cout << "Doc ID: " << docID << "\n"
    //               << "Expected Start: " << start << ", Length: " << length << "\n"
    //               << "Content:\n"
    //               << content << "\n"
    //               << "--------------------------\n";

    //     // 可以添加进一步的逻辑，比如和原内容比对
    // }
    // ifs.close();
}
