#include "PageLibPreprocessor.h"
#include "pageLib.h"
#include <iostream>
#include <string>
#include<cmath>
#include<fstream>
#include<utility>
#include <func.h>
using namespace std;

//生成webtest.txt
void PageLibPreprocessor::createWebtxt()
{
    PageLib rss;//建立一个对象
    //打开人民网语料目录流
    chdir("人民网语料");
    string pwd = getcwd(nullptr, 0);
    DIR* pdir = opendir(pwd.c_str());
    if(nullptr == pdir)
    {
        perror("opendir");
        return;
    }
    //打开每一个文件
    struct dirent *pdirent;
    while ((pdirent = readdir(pdir)) != NULL)
    {
        ifstream ifs(pdirent->d_name);
        if(!ifs.good()) {
		    perror("ifstream open error!");
            return;
	    }
        if(strcmp(pdirent->d_name,".") == 0||strcmp(pdirent->d_name,"..") == 0
            ||strcmp(pdirent->d_name,"rigepage.dat") == 0)
        {
            continue;
        }
        cout<<pdirent->d_name<<endl;//测试用
        rss.parsePage(pdirent->d_name);

	    ifs.close();
    }
    closedir(pdir);
    chdir("..");

    vector<RssItem>& _rss2 = rss.getpagelist();
    vector<string> page;
    for(int j=0; j<_rss2.size();++j)
    {
        page.push_back(_rss2[j].description);//网页库
    }
    for(auto &elem:page)
    {
        //去掉字符串中的回车
        elem.erase(std::remove(elem.begin(), elem.end(), '\n'), elem.end());
    }
    //------------输出到webtest.txt中看一看------------------------
    ofstream ofs("webtest.txt",ios::out|ios::app);
    if(!ofs.good())
    {
        cerr<<"filename open failed!"<<endl;
        return;
    }
    for(auto &elem:page)
    {
        ofs << elem <<endl;
    }
    ofs.close();
}

//生成allWordFreq.txt
void PageLibPreprocessor::createAllwordToDoc()
{
    //创造一个分词工具----------------------------------------------------------
    SplitTool spl;
    //获得所有文章的所有词------------------------------------------------------
    ifstream ifs("webtest.txt");
    if(!ifs.good()) 
    {
        perror("ifstream open error!");
        return;
    }
    string lines;
    int lineNum = 1;//第几篇文章
    while(getline(ifs, lines)) 
    {
        vector<string> lineWords;
        lineWords = spl.cut(lines);//全部剪到_allWord中
        for(auto &elem : lineWords)
        {
            _wordinHowManyDoc[elem].insert(lineNum);
        }
        lineNum++;
    }
    ifs.close();

    //------------输出到allWordFreq.txt中看一看，存出现在哪些文章---------------
    ofstream ofs("allWordFreq.txt",ios::out|ios::app);
    for(auto &elem:_wordinHowManyDoc)
    {
        ofs<<elem.first;
        for(auto &j:elem.second)
        {
            ofs<<" "<<j;
        }
        ofs<<endl;
    }
    ofs.close();
}


//把每篇文章的content都存入WebPage中
void PageLibPreprocessor::creatPageVec()
{
    ifstream ifs("webtest.txt");
    if(!ifs.good()) {
        perror("ifstream open error!");
        return;
    }
    string docContent;
    int i=1;//测试用
    while(getline(ifs,docContent))
    {
        cout<<i++<<endl;
        WebPage wp(docContent);
        wp.processDoc();
        _pageList.push_back(wp);
    }
    ifs.close();
}

//生成倒排索引库的unordered_map<string,set<pair<long,double>>>
void PageLibPreprocessor::buildInvertIndexMap()
{
    //容器大小变成4135即文章数
    _WordWeightSumInWebPage.resize(_pageList.size(),0);
    //循环处理_pageList中的WebPage对象，建立初始_invertIndex
    for(int i=0;i<_pageList.size();++i){
		//把每一篇文章的词频库map生成出来
        map<string,int> WordsMap=_pageList[i].getWordsMap();//webpage类的函数

        for(auto pair:WordsMap){
            string word=pair.first;//单词
            if(word==" ") continue;
            int TF=pair.second;  //TF:该词在该文章中出现次数
        
            //这里先把单词在一篇文章中出现的次数记录下来，留待后面使用
            _invertIndex[word].insert({i,double(TF)});//TF在这不是权重，而是TF
        }
    }

    //------------初始的不进行加权处理的权重--------------------------------------
    for(auto &pair:_invertIndex){
        auto &Map=pair.second;//map<int,double>
        for(auto &elem:Map){//遍历pair<int,double>
            int idx=elem.first;//指第几篇文章
            int TF=elem.second;     //该词在该文章中出现次数
            int DF=Map.size();      //该词在所有文章中出现的次数，即有多少文章包含该词汇，即map的元素数量
            int N=_pageList.size(); //表示网页库中网页(即文档)的总数
            double IDF;
            if(N==DF){
                IDF=0;
            }
            else{
                IDF=(double)log2((double)N/(DF+1));   //IDF
            }
            double w=(double)(TF*IDF);        //得到每个词语在每个WebPage中的初始权重
            elem.second = w;
            //这里是精髓，遍历所有单词的时候顺便 += 此时对应文章的关于这个单词的权重
            //东加一个权重，西加一个权重，最后所有文章的总权重之和就加完了
            _WordWeightSumInWebPage[idx] += w*w; //顺便累加每篇网页中所有词语的权重平方之和
        }
    }

    //-----------------------得到最终的权重--------------------------------------
    for(auto &pair:_invertIndex){
        auto &Map=pair.second;//map<int,double>
        for(auto &elem:Map){//遍历pair<int,double>
            int idx=elem.first;//指第几篇文章
            double sumWeight=_WordWeightSumInWebPage[idx];  //得到该网页所有词语的权重之和
            double second=(sumWeight==0?0:(double)(elem.second)/sqrt(sumWeight));
            elem.second=second;
        }
    }
    //------存储到文本中-----------------------
    storeOnDisk();
}

//存储到文本中
void PageLibPreprocessor::storeOnDisk()
{
    ofstream ofs("invertIndex.txt");
    if(!ofs){
        cout<<"open invertIndex.txt error"<<endl;
    }
    
    //unordered_map<string,set<pair<int,double>>> _invertIndex;
    for(auto pair:_invertIndex){
        ofs << pair.first << " ";
		auto &Map = pair.second;
		for (auto elem : Map) {
			ofs << elem.first << " " << elem.second<<" ";
		}
		ofs << endl;
    }
}