#include"../inc/WebPageLib.h"
using std::ofstream;
using std::endl;

namespace SE
{
    WebPageLib::WebPageLib(const string& stopWords_dir, const string& page_dir, const string& offset_dir)
    : _stopWords_dir(stopWords_dir), _page_dir(page_dir), _offset_dir(offset_dir)
    {
        readInfo();
    }

    void WebPageLib::readInfo()
    {
        // Read StopWords lib
        ifstream stopwords(_stopWords_dir);
        if(!stopwords.good()) 
        {
            logError("Open File");
            exit(1);
        }
        string words;
        while(getline(stopwords, words))
            _stopWords.insert(words);
        
        // Read Offset lib
        ifstream pages(_page_dir);
        ifstream offsets(_offset_dir);
        if(!pages.good() || !offsets.good());
        {
            logError("Open File");
            exit(1);
        }

        string s;
        int id;
        off_t begin, end;
        while(getline(offsets, s))
        {
            stringstream ss(s);
            ss >> id >> begin >> end;
        }
        char buf[BUFSIZ] = {0};
        pages.read(buf, end - begin);
        string doc(buf);
        WebPage webpage(doc, _splitTool, _stopWords);

        if(std::find(_webPages.begin(), _webPages.end(), webpage) == _webPages.end())
            _webPages.push_back(webpage);

        stopwords.close();
        offsets.close();
        pages.close();
    }
    
    void WebPageLib::storeOnDisk(const string& newPage, const string& newOffset, const string& invertIndex)
    {
        ofstream pages(newPage, ofstream::out);
        ofstream offsets(newOffset, ofstream::out);
        ofstream index(invertIndex, ofstream::out);

        if(!pages.good() || !offsets.good() || !index.good())
        {
            logError("Open File");
            exit(1);
        }

        for(size_t i = 0; i < _webPages.size(); i++)
        {
            string doc = _webPages[i].getDoc();
            off_t begin = pages.tellp();
            pages << doc;
            off_t end = pages.tellp();
            offsets << i << " " << end << endl;
        }

        createDF_hashmap();
        createInvertIndexTable();

        for(auto & k : _invertIndexTable)
        {
            index << k.first << " ";
            for(auto & kk : k.second)
                index << kk.first << " " << kk.second << " ";
            index << endl;
        }

        pages.close();
        offsets.close();
        index.close();
        logInfo("新的网页库，偏移库，倒排索引库创建成功");
    }

    void WebPageLib::createDF_hashmap()
    {
        for(size_t i = 0; i < _webPages.size(); i++)
        {
            map<string, int>& wordsmap = _webPages[i].getWordsMap();
            for(auto & k : wordsmap)
                _DF_hashmap[k.first]++;
        }
    }

    /*
    权重计算方法：TF-IDF算法
    TF : 单词在该篇文章中的频率 
    DF : 包含该词语的文章总数
    IDF : lg (N / DF + 1) 逆文本频率指数，N代表文章总数
    该系数越大，该单词在该文章中出现频率越高，在所有文章中出现频率越低
    weight = TF * IDF   
    */
    void WebPageLib::createInvertIndexTable()
    {
        vector<vector<pair<string, double>>> weights(_webPages.size());

        for(size_t i = 0; i < _webPages.size(); i++)
        {
            map<string, int>& wordsmap = _webPages[i].getWordsMap();
            for(auto & k : wordsmap)
            {
                string word = k.first;
                double TF = static_cast<double>(k.second/ _webPages[i].getTotal());
                int DF = _DF_hashmap[word];
                double IDF = log10(static_cast<double>(_webPages.size()/ (DF+1)));
                double w = TF * IDF;
                weights[i].push_back(std::make_pair(word, w));
            }
        }

        for(size_t i = 0; i < weights.size(); i++)
        {
            vector<pair<string, double>>& words = weights[i];
            double sum = 0;
            for(auto & k : words)
                sum += k.second * k.second;
            sum = sqrt(sum);
            for(auto & k : words)
                k.second /= sum;
        }

        for(size_t i = 0; i < weights.size(); i++)
        {
            vector<pair<string, double>>& words = weights[i];
            for(auto & k : words)
                _DF_hashmap[k.first]++;
        }
    }
}