#include "../include/PageLibPreprocessor.h"
#include "../include/PageLib.h"
#include "../include/WebPage.h"
#include "../include/Configuration.h"

#include <fstream>
#include <cmath>

using std::ofstream;

PageLibPreprocessor::PageLibPreprocessor(Configuration *pconf, SplitTool *psplitTool, PageLib &pb) 
: _psplitPool(psplitTool)
, _pconf(pconf)
{
    vector<string> myFile = pb.getFiles();
    for (auto it = myFile.begin(); it != myFile.end(); ++it) {
        string doc = *it;
        WebPage wp(doc, pconf);
        _pageLib.push_back(wp);
    }
}

PageLibPreprocessor::~PageLibPreprocessor() {

}

void PageLibPreprocessor::doProcess() {
    cutRedundantPage();
    buildInvertIndexTable();
    storeOnDisk();
}

void PageLibPreprocessor::readInfoFromFile() {

}

void PageLibPreprocessor::cutRedundantPage() {
    sort(_pageLib.begin(), _pageLib.end(), &WebPage::Compare);
    
    for (auto it = _pageLib.begin(); it != _pageLib.end() - 1; ++it) {
        auto jt = it + 1;
        if (*it == *jt) {
            _pageLib.erase(it);
        }
    }

}

void PageLibPreprocessor::buildInvertIndexTable() {
    //容器大小变成4135即文章数
    vector<double> _WordWeightSumInWebPage;
    _WordWeightSumInWebPage.resize(_pageLib.size(),0);
    //循环处理_pageLib中的WebPage对象，建立初始_invertIndexTable
    for(int i=0;i<_pageLib.size();++i){
		//把每一篇文章的词频库map生成出来
        map<string,int> WordsMap=_pageLib[i].getWordsMap();//webpage类的函数

        for(auto pair:WordsMap){
            string word=pair.first;//单词
            if(word==" ") continue;
            int TF=pair.second;  //TF:该词在该文章中出现次数
        
            //这里先把单词在一篇文章中出现的次数记录下来，留待后面使用
            _invertIndexTable[word].push_back({i,double(TF)});//TF在这不是权重，而是TF
        }
    }

    cout << "jjjj" << endl;
    //------------初始的不进行加权处理的权重--------------------------------------
    for(auto &pair:_invertIndexTable){
        auto &Map=pair.second;//map<int,double>
        for(auto &elem:Map){//遍历pair<int,double>
            int idx=elem.first;//指第几篇文章
            int TF=elem.second;     //该词在该文章中出现次数
            int DF=Map.size();      //该词在所有文章中出现的次数，即有多少文章包含该词汇，即map的元素数量
            int N=_pageLib.size(); //表示网页库中网页(即文档)的总数
            double IDF;
            if(N==DF){
                IDF=0;
            }
            else{
                IDF=(double)log2((double)N/(DF+1));   //IDF
            }
            double w=(double)(TF*IDF);        //得到每个词语在每个WebPage中的初始权重
            elem.second = w;
            //这里是精髓，遍历所有单词的时候顺便 += 此时对应文章的关于这个单词的权重
            //东加一个权重，西加一个权重，最后所有文章的总权重之和就加完了
            _WordWeightSumInWebPage[idx] += w*w; //顺便累加每篇网页中所有词语的权重平方之和
        }
    }

    cout << "kkk" << endl;
    //-----------------------得到最终的权重--------------------------------------
    for(auto &pair:_invertIndexTable){
        auto &Map=pair.second;//map<int,double>
        for(auto &elem:Map){//遍历pair<int,double>
            int idx=elem.first;//指第几篇文章
            double sumWeight=_WordWeightSumInWebPage[idx];  //得到该网页所有词语的权重之和
            double second=(sumWeight==0?0:(double)(elem.second)/sqrt(sumWeight));
            elem.second=second;
        }
    }
}

void PageLibPreprocessor::storeOnDisk() {
    ofstream ofs1("../data/newripepage.dat", std::ios::out|std::ios::app);
    if (!ofs1.good()) {
        cout <<"ofstrem open file error" << endl;
        return;
    }

    int count = 1;
    int beginOffset = 0;
    for (auto it = _pageLib.begin(); it != _pageLib.end(); ++it) {
        string newWeb = (*it).getDoc();
        size_t idLocate1 = newWeb.find("<docid>");
        size_t idLocate2 = newWeb.find("</docid>");
        newWeb.erase(idLocate1 + 7, idLocate2 - idLocate1 - 7);
        ostringstream oss;
        oss << count;
        newWeb.insert(idLocate1 + 7, oss.str());
        ofs1 << newWeb;
        _offsetLib[count++] = make_pair(beginOffset, newWeb.size());
        beginOffset += newWeb.size();
    }    
    ofs1.close();

    ofstream ofs2("../data/newoffset.dat", std::ios::out|std::ios::app);
    if (!ofs2.good()) {
        cout <<"ofstrem open file error" << endl;
        return;
    }
    for (auto &elem: _offsetLib) {
        ostringstream oss;
        oss << elem.first << " " << elem.second.first << " " << elem.second.second << endl;
        ofs2 << oss.str();
    }
    ofs2.close();

    ofstream ofs3("../data/invertIndex.dat", std::ios::out|std::ios::app);
    if (!ofs3.good()) {
        cout <<"ofstrem open file error" << endl;
        return;
    }
    set<string> mySet = _pconf->getStopWordList();
    for (auto &elem: _invertIndexTable) {
        if (mySet.count(elem.first)) {
            continue;
        }
        ostringstream oss;
        oss << elem.first << " ";
        for (auto &secondElem: elem.second) {

            oss << secondElem.first << " " << secondElem.second << " ";
        }
        oss << endl;
        ofs3 << oss.str();
    }
    ofs3.close();
}

