#include "../include/WebPageQuery.hpp"
#include "../include/Configuration.hpp"
#include "../include/PureWebPage.hpp"

#include <fstream>

#include <nlohmann/json.hpp>

using std::ifstream;
using std::istringstream;
using std::pair;
using std::string;

using Json = nlohmann::json;

WebPageQuery::WebPageQuery()
    : _jieba()
{
    //jieba需要初始化
}

bool compare(const pair<int, double> &lhs,const pair<int, double> &rhs){
    if(abs(lhs.second - rhs.second) < 0.0000000001){
        return lhs.first < rhs.first;
    }
    else{
        return lhs.second > rhs.second;
    }
}

string WebPageQuery::doQuery(string & str)
{
    vector<string> queryWords = _jieba.cut(str);
    //test
    cout << "分词: " << endl;
    for(auto& word: queryWords){
        cout << word << " ";
    }
    cout << endl;

    vector<double> queryWordsWeightVector = getQueryWordsWeightVector(queryWords);

    //test
    /* cout << "queryWordsWeightVector: " << endl; */
    /* for(auto& item: queryWordsWeightVector){ */
    /*     cout << item << " "; */
    /* } */
    cout << endl;

    vector<pair<int, vector<double>>> resultVec; // 存放查询结果

    bool findWebPage = executeQuery(queryWords, resultVec);

    //test
    /* cout << "resultVec: " << endl; */
    /* for(auto& item: resultVec){ */
    /*     cout << item.first << " "; */
    /*     for(auto& num: item.second){ */
    /*         cout << num << " "; */
    /*     } */
    /*     cout << endl; */
    /* } */

    if(findWebPage){
        vector<pair<int, double> > sortDoc;
        //1.遍历queryWordsWeightVector计算X^2
        double X2 = 0.0;
        for(auto &qw : queryWordsWeightVector){
            X2 += qw * qw;
        }
        //2.计算余弦值
        for(auto &res : resultVec){
            int docId = res.first;
            double XY = 0.0;
            double Y2 = 0.0;
            auto itx = queryWordsWeightVector.begin();
            auto ity = res.second.begin();
            for(; itx != queryWordsWeightVector.end() && ity != res.second.end(); ++itx, ++ity){

                XY += (*itx) * (*ity);
                Y2 += (*ity) * (*ity);
            }
            double cos = (XY) / (sqrt(X2) * sqrt(Y2));
            sortDoc.push_back(make_pair(docId, cos));
        }
        //3.根据余弦值排序
        sort(sortDoc.begin(), sortDoc.end(), compare);
        //4.存docId
        vector<int> sortedDocId;
        for(auto &sdocId : sortDoc){
            sortedDocId.push_back(sdocId.first); // 魔术数字，找不到原因 
        }

        // test 
        cout << "search result: " << endl;
        for(size_t idx = 0; idx < 20 && idx < sortedDocId.size(); ++idx){
            cout << "id: " << sortedDocId[idx] << endl;
            cout << "cos: " << sortDoc[idx].second << endl;
            cout << "title: " << _pageLib[sortedDocId[idx]].getTitle() << endl; 
            cout << "summary: " << _pageLib[sortedDocId[idx]].summary(queryWords) << endl;
            /* cout << "content: " << _pageLib[id].getContent() << endl; */
        }

        return createJson(sortedDocId, queryWords);

    }else{
        return returnNoAnswer();
    }
}


void WebPageQuery::loadLibrary()
{

    //读入偏移库
    ifstream ifOffset("../lib/offset.dat");
    if(!ifOffset.good()){
        perror("WebPageQuery:LoadLibrary:ifstream ifsOffset fail");
        exit(-1);
    }

    // 把偏移库写入成员变量_offsetLib
    string line;
    int docId = 0, offset = 0, len = 0;
    while(getline(ifOffset, line)){
        istringstream iss(line);
        iss >> docId >> offset >> len;

        _offsetLib[docId] = make_pair(offset, len);
    }

    //test 
    /* cout << "offsetLib size = " << _offsetLib.size() << endl; */

    //根据偏移库读入网页库
    ifstream ifsPageLib("../lib/ripepage.dat");
    if(!ifsPageLib.good()){
        perror("WebPageQuery:LoadLibrary:ifstream ifsPageLib fail");
        exit(-1);
    }

    for(auto& tmp: _offsetLib){
        docId = tmp.first;
        len = tmp.second.second;
        char doc[len];

        ifsPageLib.seekg(tmp.second.first);
        ifsPageLib.read(doc, len);

        //test
        /* cout << doc << endl; */

        string docStr(doc);
        _pageLib[docId] = PureWebPage(docStr);
    }

    // 读入倒排索引表
    ifstream ifsInvertIndex("../lib/invertIndex.dat");
    if(!ifsInvertIndex.good()){
        perror("WebPageQuery:LoadLibrary:ifstream ifsInvertIndex fail");
    }

    string word;
    double tf_idf;
    while(getline(ifsInvertIndex, line)){
        istringstream iss(line);
        set<pair<int, double>> wordIndexSet;
        iss >> word;
        while(iss >> docId >> tf_idf){
            wordIndexSet.insert(make_pair(docId, tf_idf));
        }
        _invertIndexTable[word] = std::move(wordIndexSet);
    }

}


vector<double> WebPageQuery::getQueryWordsWeightVector(vector<string> & queryWords)
    //搜索的信息需要经过jieba分词变成queryWords，再传入这个函数计算所查询
{
    //对于查询的关键词，将它们视为一篇文档 X

    int docNum = _offsetLib.size();

    //求DF: 包含该词的文档数量
    //求IDF: 逆文档频率,其计算公式为:IDF = log2(N/(DF+1))
    //合并一步
    unordered_map<string, double> wordsIDF;
    for(auto &word : queryWords){
        int IDF = log2(docNum / (_invertIndexTable[word].size() + 1));
        wordsIDF[word] = IDF;
    }

    //求TF: 某个词在 X 中出现的次数
    unordered_map<string, double> wordsTF;
    for(auto &word : queryWords){
        ++wordsTF[word];
    }

    //文档第一次遍历：求原始w
    vector<double> wordsW;
    double w2Sum = 0.0;
    for(auto &word : queryWords){
        int w = wordsIDF[word] * wordsTF[word];
        wordsW.push_back(w);
        w2Sum += w * w; //总w，用于归一化处理
    }

    //每篇文档第二次遍历：w的归一化处理
    for(auto &w : wordsW){
        w /= sqrt(w2Sum);
    }

    return wordsW;
}


bool WebPageQuery::executeQuery(const vector<string> & queryWords, vector<std::pair<int, vector<double>>> &resultVec)
{
    //1.遍历查询词，查倒排索引表，得到所有 含queryWoords中1个或多个查询词 的网页id 和 对应w
    //插入preResult
    unordered_map< int, vector<double> > preResult;
    for(auto &query : queryWords){
        for(auto &docIdnw : _invertIndexTable[query]){
            preResult[docIdnw.first].push_back(docIdnw.second);
        }
    }

    //假设查询词共有N个，那么包含所有词的ID，其对应的w的个数也应为N 

    //2.遍历preResult, 删除没有包含全部查询词的ID
    for(auto it = preResult.begin(); it != preResult.end(); ){
        if(it->second.size() < queryWords.size()){
            it = preResult.erase(it);
        }
        else{
            ++it;
        }
    }

    //3.遍历preResult，插入resultVec
    for(auto &res : preResult){
        resultVec.push_back(res);
    }

    if(resultVec.size()){
        return true;
    }

    return false;
}

string WebPageQuery::createJson(vector<int> & docIdVec, const vector<string> & queryWords)
    //涉及到json，先不做
{
    Json root;
    int sortId = 1;

    for(auto& docId: docIdVec){
        Json doc;
        doc["title"] = Json(_pageLib[docId].getTitle());
        doc["summary"] = Json(_pageLib[docId].summary(queryWords));
        doc["url"] = Json(_pageLib[docId].getLink());

        root[std::to_string(sortId)] = doc;
        sortId++;
    }
    return root.dump();

}
string WebPageQuery::returnNoAnswer()
{
    return string();
}
