#include "../../include/PageLib/PageLibPreprocess.h"
#include "../../include/Configuration.h"

#include <iostream>
#include <cmath>
using std::cerr;
using std::cout;
using std::endl;

PageLibPreprocess::PageLibPreprocess(Configuration *conf)
:_dirSacnner(conf)
,_simHasher(conf)
,_cutTool(new SplitToolCppJieba(conf))
{
    cout << "PageLibPreprocess" << endl;
    //预留空间防止崩溃
    _pageList.reserve(100);
    
}

PageLibPreprocess::~PageLibPreprocess()
{
    cout << "~PageLibPreprocess" << endl;
}

/*-------------此处是生成网页库的逻辑--------------*/
void PageLibPreprocess::createPageList()
{
    vector<string> filepath = _dirSacnner.getFilePath();
    for(auto path:filepath)
    {
        RssParse(path);
    }

    /*此处进行文本去重*/
    //存储去重后的网页
    unordered_map<int, WebPage> pagelist = _simHasher.RemoveDulPage();
    //因为去重后的网页id是乱的，需要重新设置id
    int id = 1;
    for (auto &pairPage : pagelist)
    {
        //将去重后的网页加入网页库容器
        pairPage.second._docId = id;
        _pageList.push_back(pairPage.second);
    }
}
//去除无效文本，服务于RssParse函数
string getXmlElementText(XMLElement* element,const string& tagName)
{
    XMLElement *childElement = element->FirstChildElement(tagName.c_str());
    if(childElement)
    {
        return childElement->GetText();
    }
    return "";
}

void PageLibPreprocess::RssParse(const string &filename)
{
    XMLDocument doc;
    if(doc.LoadFile(filename.c_str())!=XML_SUCCESS)
    {
        //日志
        cerr<<"Failed to load RSS file: " << filename << endl;
        exit(1);
    }

     XMLElement* channel = doc.FirstChildElement("rss")->FirstChildElement("channel");
    if (!channel)
    {
        //日志
        cerr << "Invalid RSS format." << endl;
        exit(1);
    }

    XMLElement* item = channel->FirstChildElement("item");
    int id = 1;
    while (item)
    {
        
        WebPage webpage;
        webpage._docId = id;
        id++;
        webpage._docTitle = getXmlElementText(item, "title");
        webpage._docUrl = getXmlElementText(item, "link");

        string description = getXmlElementText(item, "description");
        string content = getXmlElementText(item, "content");

        //description和content取最长的为内容
        string capacity = content.size() > description.size() ? content : description;
        
        //去除换行符
        
        // 正则表达式，去除html符号
        std::regex htmlRegex("<[^>]+>");
        string s1 = std::regex_replace(capacity, htmlRegex, "");
        
        std::regex newlinesRegex("[\r\n]+|[\u0021-\u002f\u003a-\u0040\u005b-\u0060\u007b-\u007e]+");
        webpage._docContent = std::regex_replace(s1, newlinesRegex, "");
        _initPageList.push_back(webpage);
        //将网页加入哈希去重对象
        _simHasher.addPage(webpage._docId, webpage);

        item = item->NextSiblingElement("item");
    }
}


vector<WebPage>& PageLibPreprocess::getPageList()
{
    return _pageList;
}

void PageLibPreprocess::storePageLib(const string& filepath)
{
    ofstream outfile(filepath);
    if (!outfile.is_open()) {
            cerr << "Failed to open output file: " << filepath << endl;
            exit(1);
    }

    for (int i = 0; i < _pageList.size(); ++i) {
        outfile << "<doc>" << endl;
        outfile << "\t<docid> " << i + 1 << " </docid>" << endl;
        outfile << "\t<title> " << _pageList[i]._docTitle << " </title>" << endl;
        outfile<< "\t<link> " << _pageList[i]._docUrl << " </link>" << endl;
        outfile<< "\t<content> " << _pageList[i]._docContent << " </content>" << endl;
        outfile<< "</doc>"<<endl;
    }

    outfile.close();
}
/*----------------生成网页库的逻辑的底线！！！----------------*/


/*-----------------生成网页偏移库-------------------*/
void PageLibPreprocess::createOffsetLib()
{
    //网页偏移值
    int offvalue=0;
    for (int i = 0; i < _pageList.size();++i)
    {
        //用字符串流计算字节数
        //注意标签前后都有一个空格，标题前后也有空格
        ostringstream oss;
        oss << "<doc>" << endl;
        oss<< "\t<docid> " << i + 1 << " </docid>" << endl;
        oss<< "\t<title> " << _pageList[i]._docTitle << " </title>" << endl;
        oss<< "\t<link> " << _pageList[i]._docUrl << " </link>" << endl;
        oss<< "\t<content> " << _pageList[i]._docContent << " </content>" << endl;
        oss<< "</doc>"<<endl;
        int length = oss.str().size();
        oss.clear();
        _offsetLib[i + 1] = {offvalue, length};
        offvalue += length;
        
    }
}
unordered_map<int, pair<int, int>>& PageLibPreprocess::getOffLib()
{
    return _offsetLib;
}

void PageLibPreprocess::storeOffsetLib(const string& filepath)
{
    ofstream outfile2(filepath);
    if(!outfile2.is_open())
    {
        cerr << "Failed to open output file: " << filepath << endl;
        exit(1);
    }

    for (int i = 0; i < _offsetLib.size();++i)
    {
        outfile2 << i + 1 << " " << _offsetLib[i + 1].first << " " << _offsetLib[i+1].second << endl;
    }
    outfile2.close();
}
/*-----------------生成网页偏移库-------------------*/



/*-----------------生成倒排索引库-------------------*/

void PageLibPreprocess::createInvertIndexLib()
{
    //N是总页数
    int N = _pageList.size();
    for (int i = 0; i < N;++i)
    {
        //循环处理网页
        string content = _pageList[i]._docContent;
        //分词
        vector<string> wordlist= _cutTool->cutWord(content);
        //存储单词与词频
        unordered_map<string, double> wordmap;
        for(const auto word:wordlist)
        {
            if(wordmap.count(word)>0)
            {
                wordmap[word]++;
            }
            else
            {
                wordmap[word] = 1;
            }
        }
        //将该页的单词和对应词频压入
        _wordWvalue.push_back(wordmap);
        
        // 在该页出现过的词都在_wordDF中计数
        for(const auto pairWD:wordmap)
        {
            if(_wordDF.count(pairWD.first)>0)
            {
                _wordDF[pairWD.first]++;
            }
            else
            {
                _wordDF[pairWD.first] = 1;
            }
        }
        
    }

    //计算TF-IDF
    for (int i = 0; i < _wordWvalue.size();++i)
    {   //循环计算每篇文章的W’值，下标i为文章id
        double sumW = 0;
        for(auto &pairMap:_wordWvalue[i])
        {
            string pairword = pairMap.first;
            double TF = pairMap.second;
            double DF = _wordDF[pairword];
            double W = TF * log2(N / (DF + 1));//N是总页数
            //将该元素的TF值改为TF*IDF值
            _wordWvalue[i][pairword] = W;
            sumW += W * W;
        }
        for (auto &pairMap : _wordWvalue[i])
        {
            string pairword = pairMap.first;
            double W = pairMap.second;
            double Ww = W / sqrt(sumW);
            //加入倒排索引库
            _invertIndexLib[pairword].push_back({i + 1, Ww});
        }
    }
}

unordered_map<string, vector<pair<int, double>>>& PageLibPreprocess::getInvertLib()
{
    return _invertIndexLib;
}

void PageLibPreprocess::storeInvertLib(const string& filepath)
{
    ofstream outfile3(filepath);
    if(!outfile3.is_open())
    {
        cerr << "Failed to open output file: " << filepath << endl;
        exit(1);
    }

    for (const auto &pair : _invertIndexLib)
    {
        outfile3 << pair.first << " ";
        for(const auto &vec:pair.second)
        {
            outfile3 << vec.first << " " << vec.second << " ";
        }
        outfile3 << endl;
    }
    outfile3.close();
}
/*-----------------生成倒排索引库-------------------*/