#include "WebPageQuery.h"
#include <algorithm>
#include <sstream>

using std::istringstream;
vector<std::pair<int, double>> cosinesimilarCompute(vector<std::pair<int, vector<double>>> &web_vec, vector<double> base);

WebPageQuery::WebPageQuery(const string &queryMsg, SplitTool *cuttor)
    : _queryWord(queryMsg), _jieba(cuttor) {
    // std::cout << "创建了WebPageQuery对象\n";
}

string WebPageQuery::doQuery() {
    // 加载偏移库、倒排索引库
    loadLibrary();
    // std::cout << "加载偏移库、倒排索引库\n";
    // 对查询单词分词
    vector<string> words = _jieba->cut(_queryWord);

    // 测试代码
    // std::cout << "查询词分词为：\n";
    for (const auto &w : words) {
        std::cout << w << " ";
    }
    std::cout << "\n";

    vector<double> words_weight = getQueryWordsWeightVector(words); // 基准向量base
    // std::cout << "基准向量base计算完成：" << words_weight[0] << "\n";

    // 执行查询，结果返回，涉及到网页的特征向量
    vector<std::pair<int, vector<double>>> web_vec;
    bool has_result = executeQuery(words, web_vec);

    if (has_result) {
        // 计算余弦相似度
        vector<std::pair<int, double>> web_id = cosinesimilarCompute(web_vec, words_weight);

        // 根据ID，获取网页信息
        for (const auto &w : web_id) {
            int web_page_id = w.first;
            size_t pos = _offsetLib[web_page_id].first;
            size_t len = _offsetLib[web_page_id].second;
            std::cout << "匹配网页库信息： " << web_page_id << " " << pos << " " << len << "\n";

            WebPage web(web_page_id, pos, len, _jieba);
            _pageLib[web_page_id] = web;
        }
        std::cout << "成功：网页信息获取完成\n";
        return createJson(web_id);
    } else {
        std::cout << "失败：网页信息获取完成\n";
        return returnNoAnswer();
    }
}

void WebPageQuery::loadLibrary() {
    // 加载倒排索引库、偏移库
    string offset = "/home/kyle/Project/WebDisk/data/newoffset2.dat";
    string index_table = "/home/kyle/Project/WebDisk/data/InvertIndexTable.dat";

    ifstream ifs_off(offset);
    ifstream ifs_pages(index_table);

    // 读取内容
    string off_line;
    while (getline(ifs_off, off_line)) {
        istringstream ss(off_line);
        int docid;
        size_t pos, len;
        ss >> docid >> pos >> len;
        _offsetLib[docid] = std::make_pair(pos, len);
    }

    string line;
    while (getline(ifs_pages, line)) {
        istringstream ss(line);
        string word;
        ss >> word;

        int docid;
        double weight;
        while (ss >> docid >> weight) {
            _invertIndexTable[word].insert(std::make_pair(docid, weight));
        }
    }

    ifs_off.close();
    ifs_pages.close();
}

vector<double> WebPageQuery::getQueryWordsWeightVector(vector<string> &words) {
    // 把查询词看成是文章
    // 计算TF
    unordered_map<std::string, int> tf;
    for (const auto &word : words) {
        tf[word]++;
    }

    // 计算IDF
    double idf = log(0.5);

    // 计算TF-IDF
    unordered_map<string, double> tfidf;
    for (const auto &entry : tf) {
        tfidf[entry.first] = entry.second * idf;
    }

    // 归一化
    double norm = 0.0;
    for (const auto &entry : tfidf) {
        norm += entry.second * entry.second; // 求TF-IDF的平方和
    }
    norm = sqrt(norm);

    vector<double> normalizedTfidf;
    for (const auto &entry : tfidf) {
        double normalizedValue = entry.second / norm; // 归一化每个TF-IDF值
        normalizedTfidf.push_back(normalizedValue);
    }

    return normalizedTfidf;
}

bool WebPageQuery::executeQuery(const vector<string> &query_words, vector<std::pair<int, vector<double>>> &web_vec) {
    // 临时存储结果，存储每个网页的ID和该网页的所有词的权重
    unordered_map<int, vector<double>> webpage_weights;

    // 存储所有符合条件的网页ID的交集
    set<int> result_webpages;

    // 遍历查询词
    for (size_t i = 0; i < query_words.size(); ++i) {
        const string &word = query_words[i];

        // 检查单词是否存在于倒排索引表中
        if (_invertIndexTable.find(word) == _invertIndexTable.end()) {
            return false; // 如果找不到，直接返回false
        }

        // 获取该单词对应的网页和权重
        const set<std::pair<int, double>> &result_webpages_for_word = _invertIndexTable[word];

        // 临时存储当前词存在的网页ID
        set<int> current_word_webpages;
        for (const auto &entry : result_webpages_for_word) {
            current_word_webpages.insert(entry.first);
        }

        // 如果是第一个查询词，初始化结果网页集合
        if (result_webpages.empty()) {
            result_webpages = current_word_webpages;
        } else {
            // 对后续查询词，取交集
            set<int> temp_intersection;
            set_intersection(result_webpages.begin(), result_webpages.end(),
                             current_word_webpages.begin(), current_word_webpages.end(),
                             inserter(temp_intersection, temp_intersection.begin()));
            result_webpages = temp_intersection;
        }
    }

    // 如果没有符合条件的网页，直接返回false
    if (result_webpages.empty()) {
        return false;
    }

    // 汇总所有交集中的网页及其对应的权重
    for (const int webpage_id : result_webpages) {
        vector<double> weights(query_words.size(), 0.0); // 初始化网页的权重向量

        // 对每个查询词，查找该网页的权重
        for (size_t i = 0; i < query_words.size(); ++i) {
            const string &word = query_words[i];

            // 查找该网页在当前词中的权重
            if (_invertIndexTable.find(word) != _invertIndexTable.end()) {
                const auto &word_results = _invertIndexTable[word];
                for (const auto &entry : word_results) {
                    if (entry.first == webpage_id) {
                        weights[i] = entry.second; // 保存该词在网页中的权重
                        break;
                    }
                }
            }
        }

        // 将该网页ID和权重向量添加到结果中
        web_vec.push_back({webpage_id, weights});
    }

    return true;
}

string WebPageQuery::createJson(vector<std::pair<int, double>> &web_id) {
    nlohmann::json json;
    for (const auto &web : web_id) {
        json.push_back({{"id", _pageLib[web.first].getDocid()},
                        {"title", _pageLib[web.first].getTitle()},
                        {"url", _pageLib[web.first].getUrl()},
                        {"summary", _pageLib[web.first].getSummary()}});
    }

    string json_str = json.dump();
    std::cout << "json创建成功\n";

    // 组装成 http响应体
    std::string response = "HTTP/1.1 200 OK\r\n";
    response += "Content-Type: application/json\r\n";
    response += "Content-Length: " + std::to_string(json_str.size()) + "\r\n";

    // 添加 CORS 头部
    response += "Access-Control-Allow-Origin: *\r\n";                            // 允许所有来源跨域访问
    response += "Access-Control-Allow-Methods: GET, POST, OPTIONS\r\n";          // 允许的 HTTP 方法
    response += "Access-Control-Allow-Headers: Content-Type, Authorization\r\n"; // 允许的请求头

    response += "\r\n"; // 空行，表示头部结束
    response += json_str;
    response += "\r\n"; // 空行，表示体结束

    return response;
}

string WebPageQuery::returnNoAnswer() {
    nlohmann::json json;
    string str = "";
    std::cout << "json创建成功\n";
    return str;
}

vector<std::pair<int, double>> cosinesimilarCompute(vector<std::pair<int, vector<double>>> &web_vec, vector<double> base) {
    // 存储每个网页与查询词的相似度
    vector<std::pair<int, double>> similarities;

    // 计算每个网页与查询的相似度
    for (const auto &webpage : web_vec) {
        int webpage_id = webpage.first;
        const vector<double> &webpage_weights = webpage.second;

        // 计算余弦相似度： CosineSimilarity = (A * B) / (||A|| * ||B||)
        double dot_product = 0.0;
        double norm_webpage = 0.0;
        double norm_query = 0.0;

        // 计算点积和两个向量的模长
        for (size_t i = 0; i < base.size(); ++i) {
            dot_product += webpage_weights[i] * base[i];
            norm_webpage += webpage_weights[i] * webpage_weights[i];
            norm_query += base[i] * base[i];
        }

        // 计算余弦相似度
        double cosine_similarity = dot_product / (sqrt(norm_webpage) * sqrt(norm_query));

        // 将网页ID和相似度存储在 similarities 中
        similarities.push_back({webpage_id, cosine_similarity});
    }

    // 按照相似度降序排序
    sort(similarities.begin(), similarities.end(), [](const std::pair<int, double> &a, const std::pair<int, double> &b) {
        return a.second > b.second; // 从大到小排序
    });

    return similarities;
}