#include "WebPageQuery.h"
#include "nlohmann/json.hpp"
#include "Mylogger.h"
#include "tinyxml2.h"
#include <fstream>
#include <sstream>
#include <queue>

using std::cout;
using std::ifstream;
using std::istringstream;
using std::priority_queue;
using namespace::tinyxml2;
using Json = nlohmann::json;

bool operator<(const Similarity& lhs,const Similarity& rhs){
    if(lhs.cosValue < rhs.cosValue){
        return true;
    }else{
        return false;
    }
}

void WebPageQuery::buildStopSet(){
    for(string filename:_conf->_stopFiles){
        //打开文件
        ifstream ifs(filename);
        if(!ifs.good()){
            std::cout << "open " << filename << " fail "<<std::endl;
            return ;
        }
        //读出一个单词
        string word;
        while(ifs >> word){
            //加入set
            _stopWords.insert(word);
        }
        //关闭文件
        ifs.close();
    }

}

void WebPageQuery::readInfo(){
    //获得文件偏移库
    //1.打开文件
    ifstream ifs(_offsetLibPathname);
    //2.循环读取文件的每一行
    string line;
    while(getline(ifs,line)){
        int docid=0,pos=0,length=0;
        istringstream iss(line);
        iss >> docid >> pos >> length;
        //加入_offsetLib
        _offsetLib[docid] = std::make_pair(pos,length);
    }
    //3.关闭文件
    ifs.close();

    //获得倒排索引
    ifstream ifs2(_invertIndexName);
    //2.循环读取文件的每一行
    string line2;
    while(getline(ifs2,line2)){
        string word;
        istringstream iss(line2);
        iss >> word;
        /* cout << "word = " << word << "\n"; */
        while(1){
            int docid;
            double weight;
            iss >> docid;
            if(!iss.good()){
                break;
            }
            iss >> weight;
            if(!iss.good()){
                break;
            }
            /* cout << "docid = " << docid << "weight = " << weight << "\n"; */
            //加入_invertedIndex
            _invertIndex[word].insert(std::make_pair(docid,weight));
        }
    }
    //3.关闭文件
    ifs2.close();

    /* cout << "_offsetLib.size() = " << _offsetLib.size() << "\n"; */
    /* cout << "_invertIndex.size() = " << _invertIndex.size() << "\n"; */
}

//对要查询的数据切分成单词,并计算出基准向量,map中的string为单词,double为权值
void WebPageQuery::cutQuery(const string& query,vector<string>& words,map<string,double>& vec){
    vector<string> beginWords;
    map<string,int> wordTimes;   //单词及其出现的次数
    _cuttor->cut(query,beginWords);
    //统计次文章中的关键字
    for(const string& word:beginWords){
        //如果单词没有出现在停止中，就加入unordered_map
        int count = _stopWords.count(word);
        if(count == 0){
            wordTimes[word]++;
        }
    }
    //计算出每个单词的权值
    int N = _offsetLib.size();
    map<string,double> wordWeight;
    for(pair<string,int> elem : wordTimes){
        string word = elem.first;
        int times = elem.second;
        wordWeight[word] = log2(N/(1+1)+1)*times;
    }
    //对权值归一化处理
    double sumWeight2 = 0;
    for(pair<string,double> elem:wordWeight){
        sumWeight2 += elem.second*elem.second;
        //顺便找出关键词加入vector中
        words.push_back(elem.first);
    }
    double sumWeight = sqrt(sumWeight2);
    for(auto it =wordWeight.begin(); it != wordWeight.end(); ++it){
        it->second = it->second/sumWeight;    
    }
    //将向量的结果返回
    vec.swap(wordWeight);
}

//获得包含所有关键字的交集
void WebPageQuery::getIntersection(const vector<string>& words,set<int>& docids){
   //获得每个单词对应的集合
   unordered_map<string,set<int>> wordDocSet;
   for(string word:words){
       //查看倒排集合中是否有该单词
       auto it = _invertIndex.find(word);
       if(it == _invertIndex.end()){
           //倒排索引中没有该单词,此次检索结果为空
           /* cout << "1\n"; */
           docids.clear();
           return ;
       }else{
           //倒排索引中有该单词,将结果加入集合中
           for(auto elem : it->second){
                int docid = elem.first;
                wordDocSet[word].insert(docid);
           }
       }
       /* cout << "wordDocSet[word].size() = " << wordDocSet[word].size() << "\n"; */
   }
   //将所有集合取交集
   //获得第一个子集合
   set<int> firstSet;
   if(wordDocSet.empty()){
        //检索结果为空 
        docids.clear();
        cout << "2\n";
        return ;
   }
   firstSet = wordDocSet.begin()->second;
    //最终集合中的docid必须在每个子集合中都出现
    //也就是说如果一个docid没有出现在第一个集合中
    //,那么他也就不可能是最终的docid
   for(int docid : firstSet){
       auto nextElem = wordDocSet.begin();
       ++nextElem;
       int flag = true;  //表示该docid可以存入最终的交集
       for(;nextElem!=wordDocSet.end();++nextElem){
            int count = nextElem->second.count(docid);
            if(count == 0){
                //在其他集合中没有该docid
                flag = false; //该docid不应该出现在最终交集中  
                break;
            }
       } 
       if(flag == true){
           //将docid加入交集中
            docids.insert(docid); 
       }
   }
}

//计算出每个docid对应的向量
void WebPageQuery::getAllVec(const vector<string>& words,const set<int>& docids,map<int,map<string,double>>& allvec){
    //1.找到文章docid
    for(int docid : docids){
        //2.找到单词
        for(string word: words){
            //3.获取单词的权值
                double weight = 0;
                set<pair<int,double>> Set = _invertIndex[word];
                for(auto elem : Set){
                    if(elem.first == docid){
                        weight = elem.second;
                    }
                }
            //4.将结果插入allvec
            allvec[docid].insert(make_pair(word,weight));
        }
    }
}

//计算出docid及其与要查询数据的余弦相似度
void WebPageQuery::getSimilarity(const map<int,map<string,double>>allvec,const map<string,double>& vec,vector<Similarity>& similarity){
    //计算出基准向量的长度
    double vecLength2 = 0;
    for(auto elem : vec){
        vecLength2 += elem.second*elem.second;
    }
    double veclength = sqrt(vecLength2);

    //计算余弦相似度
    //取出一篇文章的向量
    for(auto elem: allvec){
        int docid = elem.first;
        map<string,double> docvec = elem.second; 
        double pointmulti = 0;
        double docvecLength2 = 0;
        //取出每个单词的偏移量
        for(auto wordOffset : docvec){
            string word = wordOffset.first;
            double weight = wordOffset.second;
            docvecLength2 += weight*weight;
            //找到基准向量,在该单词放心的偏移量
            auto it = vec.find(word);
            double offset = 0;
            if(it != vec.end()){
                offset = it->second;
            }
            pointmulti += weight*offset;
        }
        //计算余弦值
        double docvecLength = sqrt(docvecLength2);
        double cosValue = pointmulti/(veclength+docvecLength);
        //将结果写入map
        Similarity sim;
        sim.docid = docid;
        sim.cosValue = cosValue;
        similarity.push_back(sim);
    }
}

//统计出余弦相似度最高的十片文档的docid
void WebPageQuery::getTopSimilarity(const vector<Similarity>& similarity,vector<int>& docids){
    priority_queue<Similarity> prique;
    for(Similarity sim : similarity){
        prique.push(sim);
    }
    //从prique中选出前十个作为结果
    for(int idx = 0; idx < 10; ++idx){
        if(!prique.empty()){
            const Similarity& res = prique.top();
            docids.push_back(res.docid);
            prique.pop();
        }else{
            break;
        }
    }
}

size_t WebPageQuery::nBytesCode(const char ch){
    if(ch & (1<<7)){
        int nBytes = 1;
        for(int idx = 0;idx != 6; ++idx){
            if(ch & (1<<(6-idx))){
                ++nBytes;
            }else{
                break;
            }
        }
        return nBytes;
    }
    return 1;
}

//找出要返回的文章,并将其组织为Json格式
void WebPageQuery::getResult(const vector<int>& docids,string& result){
    Json Result;
    int docidx = 1;
    for(int docid : docids){
        //读取出paper
        string paper;
        getWebPage(docid,paper);
        //解析出webPage
        WebPage webPage;
        parsePaper(paper,webPage);
        int wdocid = webPage._docId;
        string title = webPage._docTitle;
        string link = webPage._docUrl;
        //切割出前100的汉字或者字符
        string content = webPage._docContent;
        int idx = 0;
        for(int characters = 0;idx <(int)content.size() && characters <= 50;++characters){
            size_t clength = nBytesCode(content[idx]);
            idx += clength;
        }
        string abstract = webPage._docContent.substr(0,idx);
        //将结果写入Json
        /* Result["docs"].push_back({{"docid",wdocid},{"title",title}, */
        /*                          {"link",link},{"abstract",abstract}}); */
        /* cout << "docid = " << wdocid << "\n"; */
        /* cout << "title = " << title << "\n"; */
        /* cout << "link = " << link << "\n"; */
        /* cout << "abstract = " << abstract << "\n"; */
        string key = string("doc")+std::to_string(docidx++);
        Result[key]["docid"] = wdocid;
        Result[key]["title"] = title;
        Result[key]["link"] = link;
        Result[key]["abstract"] = abstract;
    }
    result = Result.dump();
}

//从偏移库中读出一篇文章
void WebPageQuery::getWebPage(int docid,string & paper){
    //1.获得起始位置和偏移量
    auto it = _offsetLib.find(docid);
    int pos = it->second.first;
    int length = it->second.second;
    /* cout << "_offsetLib lenth = " << length << "\n"; */
    //1.打开文件
    ifstream ifs(_cleanedPageName);
    //2.偏移到起始位置
    ifs.seekg(pos);
    //3.读取指定字节的信息
    int count = 0;
    string gPaper;
    char buf[4096];
    while(count != length){
        memset(buf,0,sizeof(buf));
        if(length-count > (int)sizeof(buf)-1){
            ifs.read(buf,(int)sizeof(buf)-1);
            gPaper = gPaper + buf;
            /* cout << "strlen(buf) = " << strlen(buf) << endl; */
            count = gPaper.size();
        }else{
            ifs.read(buf,length-count);
            gPaper = gPaper + buf;
            /* cout << "strlen(buf) = " << strlen(buf) << endl; */
            count = gPaper.size();
        }
    }
    paper = gPaper;
    //4.关闭文件
    ifs.close();

}

//解析从偏移库中读出的文章
void WebPageQuery::parsePaper(string paper,WebPage& webPage){
    //载入文件
    XMLDocument doc;
    doc.Parse(paper.c_str(),paper.size());
    XMLElement* docNode = doc.FirstChildElement("doc");
    if(docNode == nullptr){
        LogWarn("Paper parsing error!");
        return ;
    }

    XMLElement * docidNode = docNode->FirstChildElement("docid");
    XMLElement * titleNode = docNode->FirstChildElement("title");
    XMLElement * linkNode = docNode->FirstChildElement("link");
    XMLElement * contentNode = docNode->FirstChildElement("content");
    if(docidNode == nullptr || titleNode==nullptr
       ||linkNode == nullptr || contentNode == nullptr){
        LogWarn("Paper parsing error!");
        return ;
    }

    string docid_string  = docidNode->GetText();
    int docid = std::stoi(docid_string);
    string title  = titleNode->GetText();
    string link  = linkNode->GetText();
    string content  = contentNode->GetText();

    webPage._docId = docid;
    webPage._docUrl = link;
    webPage._docTitle = title;
    webPage._docContent = content;
}

//将结果以json格式的字符串返回
void WebPageQuery::doQuery(const string& query,string& result){
    LogInfo("In preparation...");
    //1.得到关键字集合和基准向量
    vector<string> words;
    map<string,double> vec;
    cutQuery(query,words,vec);
    //----------------
    /* for(string word : words){ */
    /*     cout << "word = " << word << "\n"; */
    /* } */
    /* for(auto elem:vec){ */
    /*     cout << "string = " << elem.first << "\n"; */
    /*     cout << "double = " << elem.second << "\n"; */
    /* } */
    //----------------
    //2.获得包含所有关键字的文章的docid
    set<int> docids; 
    getIntersection(words,docids);
    //----------------
    /* for(int docid : docids){ */
    /*     cout << "docid = " << docid << "\n"; */
    /* } */
    //----------------
    //3.计算出每个docid对应的向量
    map<int,map<string,double>> allvec;
    getAllVec(words,docids,allvec);
    //4.计算出docid及其与要查询数据的余弦相似度
    vector<Similarity> similarity;
    getSimilarity(allvec,vec,similarity);
    //----------------
    /* for(Similarity sim : similarity){ */
    /*     cout << "sim.docid = " << sim.docid */ 
    /*         << "sim.cosValue = "<< sim.cosValue <<"\n"; */
    /* } */
    //----------------
    LogInfo("Getting results");
    //5.统计出余弦相似度最高的十片文档
    vector<int> resultDocids;
    getTopSimilarity(similarity,resultDocids);
    //----------------
    /* for(int docid : resultDocids){ */
    /*     cout << "resultDocid = " << docid <<"\n"; */
    /* } */
    //----------------
    //6.获得结果
    getResult(resultDocids,result);
    LogInfo("Query successful");
}
