#include "WebPageQuery.h"
#include "nlohmann/json.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
#include <iterator>

using std::cerr;
using std::cout;
using std::ifstream;
using std::ostream_iterator;
using std::ostringstream;
using namespace nlohmann;

int get_utf8(char byte)
{
    int bytenum = 0;
    for (int i = 0; i < 6; ++i)
    {
        if (byte & (1 << (7 - i)))
        {
            ++bytenum;
        }
        else
            break;
    }
    return bytenum = bytenum == 0 ? 1 : bytenum;
}

// 构造并预加载本地内容
WebPageQuery::WebPageQuery(SplitTool *lhs)
    : _jieba(lhs)
{
    init();
    _ifs.open("/home/sakura/all_file/search_engine/data/newripe.data");
}

// 初始化
void WebPageQuery::init()
{
    // 加载偏移库
    ifstream offifs("/home/sakura/all_file/search_engine/data/newoffset.dat");
    if (!offifs)
    {
        cerr << " open offset.dat failed --WebPageQuery.cc -26\n";
        return;
    }
    ostringstream oss;
    oss << offifs.rdbuf();
    string transfer = oss.str();
    nlohmann::json json = nlohmann::json::parse(transfer.c_str());

    for (auto &item : json)
    {
        _offsetlib[item[0]] = {item[1][0], item[1][1]};
    }
    offifs.close();
    cout << " 偏移打开\n";

    // 加载倒排索引库
    ifstream indexIfs("/home/sakura/all_file/search_engine/data/invertIndex.dat");
    if (!indexIfs)
    {
        cerr << " open invertIndex.dat failed --WebPagQuery.cc -43 \n";
    }
    string line;
#if 0
    while(std::getline(indexIfs,line))
    {
        std::istringstream   iss(line);
        string str;
        iss>>str;
        string docid ;
        iss>>docid; 
        string dou;
        iss>>dou;
        int id = std::stoi(docid);
        double doub = std::stod(dou);
        _invertindex[str].emplace_back(id,doub);
    }
#endif
    // ostringstream indexOss;
    // indexOss << indexIfs.rdbuf();
    // string trans = indexOss.str();
    // if (trans.empty())
    //{
    //    std::cerr << " null \n";
    //}
    // cout<<trans;

    // #if 0
    nlohmann::json jsonIndex;
    indexIfs >> jsonIndex;
    for (auto &[key, value] : jsonIndex.items())
    {
        // _invertindex.insert({key,value});
        // cout<<key<<"  "<<value<<" \n";
        if (value.is_array())
        {
            for (auto &i : value)
            {
                // cout<<i[0]<<"              test for"<<i[1]<<"\n";
                _invertindex[key].emplace_back(i[0], i[1]);
            }
        }
        // for(auto& i: value)
        // {
        // cout<<i[0]<<"              test for"<<i[0][1]<<"\n";
        // cout<<i[0]<<"              test for"<<"\n";
        // _invertindex[key].push_back({i[0],i[0][1]});
        // kkkkkkkk}
    }
    //  #endif

    indexIfs.close();
    cout << " 倒排索引打开\n";

    // 加载中文停用词库
    ifstream stopIfs("/home/sakura/all_file/search_engine/static/stop/stop_words_zh.txt");
    string words;
    while (std::getline(stopIfs, words))
    {
        words.pop_back();
        _stoplib.emplace(words);
    }
    stopIfs.close();
    cout << " init member container success\n";
}

void WebPageQuery::push_art(getmyresult &lhs)
{
    int docid = lhs.first;
    int begin = _offsetlib[docid].first;
    int end = _offsetlib[docid].second;
    int length = end - begin + 1;
    _ifs.seekg(begin, std::ios::beg);
    char *buf = new char[length + 1]{0};
    _ifs.read(buf, length);
    // cout<<"\n"<<docid<<"   "<<lhs.second<<buf<<"\n";
    _pages.emplace_back(WebPage(buf));
}

void WebPageQuery::getpage(priority_queue<getmyresult> &lhs)
{
    int i = 0;
    while (!lhs.empty())
    {
        ++i;
        getmyresult ret = lhs.top();
        lhs.pop();
        push_art(ret);
        if (i == 10)
            break;
    }
}

// 传入关键词，并依据索引检查_inverindex
nlohmann::json WebPageQuery::doQuery(const string &key)
{

    // 检查关键字需要分词，分中英文，查索引，找文章号并把所有词对应的权重找出来，形成向量

cout<<key<<"\n";
    // 第一步分词，并去重,去停用词
    vector<string> wordset = _jieba->cut(key);
    map<string, int> freq;
    for (auto &i : wordset)
    {
        if (_stoplib.find(i) == _stoplib.end())
        {
            ++freq[i];
        }
    }
    // 第二步查倒排索引，找到其对应文章号以及权重
    map<int, vector<pair<double, int>>> caculate;
    map<int, set<string>> calu;
    for (auto &key : freq)
    {
        cout << key.first << "\n";
        // 倒排索引找到对应的文章号和权重组成的pair
        for (auto &j : _invertindex[key.first])
        {
            int docid = j.first;
            double weight = j.second;
            caculate[docid].emplace_back(weight, key.second);
            calu[docid].emplace(key.first);
            // cout<<docid<<" "<<weight<<" "<<key.second<<"\n";
        }
    }
    int size = freq.size();
    for (auto &i : calu)
    {
        if (i.second.size() != size)
        {
            caculate.erase(i.first);
        }
    }
    // 第三步对权重加以处理形成向量
    priority_queue<getmyresult> result;

    for (auto &Pair : caculate)
    {

        double sums = 0;
        double multiply = 0;
        double senmulti = 0;
        std::for_each(Pair.second.begin(), Pair.second.end(), [&](pair<double, int> rhs)
                      {
                      multiply += rhs.first*rhs.first;
                      senmulti += rhs.second*rhs.second;
                      sums +=rhs.first*rhs.second;
                       });
        multiply = sqrt(multiply);
        senmulti = sqrt(senmulti);
        double ret = sums / (multiply * senmulti);
        getmyresult hs;
        hs.first = Pair.first;
        hs.second = ret;
        result.push(hs);
    }

    getpage(result);
    // 根据优先级队列中的数据读取对应的文章内容并返回序列化后的json
    nlohmann::json json =_pages;  
    return json;
}
