#pragma once

#include <cppjieba/Jieba.hpp>
#include <utfcpp/utf8.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <map>
#include <set>
#include <queue>
#include <tuple>
#include <algorithm>
#include <nlohmann/json.hpp>
#include <tinyxml2.h>
#include "MyTask.h"

using std::ifstream;
using std::map;
using std::set;
using std::istringstream;
using std::cout;
using std::cerr;
using std::endl;
using std::priority_queue;
using std::pair;
using std::tuple;
using std::set_intersection;
using namespace nlohmann::literals;
using namespace tinyxml2;

struct Document
{
    string title;
    string link;
    string content;
};

inline void readOffset(ifstream &ifs, map<int, pair<int, int>> &offsets)
{
    string line;
    while(getline(ifs, line))
    {
        istringstream iss{line};
        int id;
        int offs;
        int size;
        iss >> id;
        iss >> offs;
        iss >> size;
        offsets[id] = pair<int, int>(offs, size);
    }
}

inline void readIndex(ifstream &ifs, map<string, map<int, double>> &idx)
{
    string line;
    while(getline(ifs, line))
    {
        istringstream iss{line};
        string word;
        iss >> word;
        int id;
        double weigth;
        while(iss >> id >> weigth)
        {
            idx[word].emplace(id, weigth);
        }
    }
}

inline void readStop(ifstream &ifs, set<string> &stop)
{
    string line;
    while(getline(ifs, line))
    {
        istringstream iss{line};
        string word;
        while(iss >> word)
        {
            stop.insert(word);
        }
    }
    ifs.close(); 
}

inline string parseDoc(const string &tag, const string &text)
{
    string open = "<" + tag + ">";
    string close = "</" + tag + ">";
    auto begin = text.find(open);
    auto end = text.find(close);
    if(begin == string::npos || end == string::npos)
    {   
        return "";   
    }

    begin += open.size();
    string content = text.substr(begin, end - begin);
    string abstract; 
    if(tag == "content")
    {
        const char *it = content.c_str();
        const char *ends = content.c_str() + content.size();

        for(size_t i = 0; i < 50 && it < ends; ++i)
        {
            auto start = it;
            utf8::next(it, ends);
            abstract.append(start, it);
        }
        return abstract;
    }
    else
    {
        return content;
    }
}

class WebPageSearchTask
: public MyTask
{
public:
    WebPageSearchTask(const string &msg, const TcpConnectionPtr &con)
    : m_msg(msg), m_con(con)
    {
        ifstream ifsoffsetlib{"../../libfile/offsetslib.dat"};
        ifstream ifsinvertidxlib{"../../libfile/invertedIndexLib.dat"};
        ifstream ifsstop{"../../corpus/stopwords/cn_stopwords.txt"};

        readOffset(ifsoffsetlib, m_offsets);
        readIndex(ifsinvertidxlib, m_invertedIndex);
    }
   
    

    virtual void process(MyThread* myTh) override
    {
#ifdef DEBUG
        cout << "开始执行网页搜索任务" << endl;
#endif

        auto data = myTh->cache.get(m_msg);
        if(!data.empty())
        {
            cout << "从缓存中得到数据" << endl;
            m_con->sendInLoop(data.dump(4));
            return;
        }

        vector<string> words;
        map<string, int> wordFreq;
        map<string, double> wordWeigth;
        m_tokenizer.Cut(m_msg, words);
        int nums = words.size(); // 总词数
        for(auto &word : words)
        {
            if(m_stopWordsSet.find(word) == m_stopWordsSet.end())
            {
                ++wordFreq[word];
#ifdef DEBUG
                cout << "[" << word << "]" << endl;
#endif
            }
        }
#ifdef DEBUG
        cout << "已完成输入解析" << endl;
#endif

        // 计算TF-IDF
        double sum_of_squares;
        for(auto &[word, freq] : wordFreq)
        {
            double TF = (double)freq / (double)nums;
            int N = m_offsets.size();
            if(m_invertedIndex.find(word) == m_invertedIndex.end())
            {
                continue;
            }
            int DF = m_invertedIndex.at(word).size();
            double IDF = log2(N + 1.0/DF + 1.0);
            double weigth = TF * IDF;
            sum_of_squares += (weigth * weigth);
            wordWeigth[word] = weigth;
        }
#ifdef DEBUG
        cout << "已完成TF-IDF计算" << endl;
#endif

        vector<int> id; // 关键字所在文档
        vector<int> intersection; // 所有关键字所在文档交集
        vector<int> temp;
        vector<double> weigthX; // 用户输入的文档的关键字权重向量X
        vector<string> wds; // 根据权重顺序所得到的关键字的序列 
        bool set_vec = true; // 判断是否为第一次
        for(auto &[word, weigth] : wordWeigth)
        {
            weigth = weigth / sqrt(sum_of_squares);
            weigthX.push_back(weigth);
            wds.push_back(word);
            if(set_vec){
                for(auto &wordidx : m_invertedIndex.at(word))
                {
                    intersection.push_back(wordidx.first);
                }
                set_vec = false;
            }else
            {
                for(auto &wordidx : m_invertedIndex.at(word))
                {
                    id.push_back(wordidx.first);
                }
                set_intersection(
                    id.begin(), 
                    id.end(), 
                    intersection.begin(), 
                    intersection.end(),
                    std::back_inserter(temp)
                );
                std::swap(intersection, temp);
                id.clear();
                temp.clear();
            }
        }        

        if(intersection.empty())
        {
            cout << "未找到网页" << endl;
            nlohmann::json js = R"(
            [
                {
                    "id":0,
                    "title":"未找到网页",
                    "link":"未找到网页",
                    "abstact":"未找到网页"
                }
            ]
            )"_json;
            m_con->sendInLoop(js.dump(4));
            return;
        }

        // 根据候选文档id得到的对应权重，并计算余弦相似度
        vector<pair<int, double>> docuVec;
        for(auto &docId : intersection)
        {
            vector<double> weigthY; //文档i的权重向量Y
            for(auto &word : wds)
            {
                double w = 0.0;
                auto it = m_invertedIndex[word].find(docId);
                if(it != m_invertedIndex[word].end())
                {
                    w = it->second;
                }
                weigthY.push_back(w);
            }

            double cosn;
            for(size_t i = 0; i < weigthX.size(); ++i)
            {
                cosn += weigthX[i] * weigthY[i];
            }
            docuVec.push_back({docId, cosn});
        }
#ifdef DEBUG
        cout << "已完成余弦相似度计算" << endl;
#endif

        std::sort(docuVec.begin(), docuVec.end(), [](pair<int, double> &p1, pair<int, double> &p2)
        {
            return p1.second > p2.second;
        });

        ifstream ifs{"../../libfile/pageslib.dat"};

        nlohmann::json js = nlohmann::json::array();
        for(auto &doc : docuVec)
        {
            int off = m_offsets[doc.first].first;
            int size = m_offsets[doc.first].second;
            string content(size, '\0');
            ifs.seekg(off);
            ifs.read(content.data(), size);
            string title = parseDoc("title", content);
            string link = parseDoc("link", content);
            string abstruct = parseDoc("content", content);
            js.push_back(
                {
                    {"id", doc.first},
                    {"title", title},
                    {"link", link},
                    {"abstruct", abstruct}
                }
            );
        }

#ifdef DEBUG
        cout << "生成 json" << endl;
#endif

        myTh->cache.put(m_msg, js);
        myTh->patch.put(m_msg, js);
        m_con->sendInLoop(js.dump(4));
    }

    virtual ~WebPageSearchTask() override
    {}

private:
    string m_msg;
    TcpConnectionPtr m_con;

    // map<int, Document>m_document;
    cppjieba::Jieba m_tokenizer;
    set<string> m_stopWordsSet;
    map<int, pair<int, int>> m_offsets;
    map<string, map<int, double>>m_invertedIndex;
};