#define DICT "corpus/dict_cn.dat"
#define INDEX "corpus/index_cn.dat"
#define STOPWORDS "corpus/stopwords/cn_stopwords.txt"
#define INVERTED_INDEX "corpus/inverted_index.dat"
#define OFFSET "corpus/page_offset.dat"
#define PAGE_LIBRARY "corpus/page_library.dat"

#include "QueryEngine.h" // 包含改造后的头文件
#include "TextUtils.h"
#include "LogUtils.h"
#include <iostream>
#include <vector>
#include <set>
#include <fstream>
#include <sstream>
#include <queue>
#include <algorithm> // 添加 std::min 的头文件
#include "LogUtils.h"
#include <nlohmann/json.hpp>
#include <cppjieba/Jieba.hpp>
#include <utfcpp/utf8.h>

using std::cout;
using std::endl;
using std::cerr;
using std::vector;
using std::set;
using std::list;
using std::multimap;
using std::ifstream;
using std::istringstream;
using std::stringstream;
using std::priority_queue;
using std::ostringstream;
using nlohmann::json;

// 实现线程安全的 getInstance() 方法
QueryEngine& QueryEngine::getInstance() {
    // 局部静态变量：C++11/14/17 保证初始化是线程安全的。
    // 确保 QueryEngine 对象的构造函数只会被调用一次，即便是多线程并发访问。
    static QueryEngine instance; 
    return instance;
}

// 实现私有构造函数
QueryEngine::QueryEngine() {
    LOG_INFO << "[QueryEngine] Loading dict.dat...";
    ifstream ifs_dict{ DICT };
    if(!ifs_dict.is_open()){
        LOG_ERROR << "DICT open failed, please check out!";
        exit(-1); 
    }
    string line;
    while(std::getline(ifs_dict,line)){
        auto pos = line.find(" ");
        string word = line.substr(0,pos);
        string frequency = line.substr(pos+1);
        m_dict.push_back(std::make_pair(word,std::stoi(frequency)));
    }
    ifs_dict.close();

    LOG_INFO << "[QueryEngine] Loading index.dat...";
    ifstream ifs_index{ INDEX };
    if(!ifs_index.is_open()){
        LOG_ERROR << "INDEX open failed, please check out!";
        exit(-1);
    }
    while(std::getline(ifs_index,line)){
        auto pos = line.find(" ");
        string word = line.substr(0,pos);
        string idxLine = line.substr(pos+1);
        istringstream iss{idxLine};
        string idx;
        while(iss >> idx){
            m_index[word].insert(std::stoi(idx));
        }
    }

    LOG_INFO << "[QueryEngine] Generating stopWords...";
    generate_stopwords(STOPWORDS,m_stopWords);

    LOG_INFO << "[QueryEngine] Loading inverted index...";
    ifstream ifs_inverted_index{INVERTED_INDEX};
    if(!ifs_inverted_index.is_open()){
        cerr << "inverted index open falied, please check out!" << endl;
        exit(-1);
    }
    while(std::getline(ifs_inverted_index,line)){
        auto pos = line.find(" ");
        string word = line.substr(0,pos);
        string keyWordInfo = line.substr(pos+1);
        istringstream iss{keyWordInfo};
        string id;
        string frequency;
        while(iss >> id >> frequency){
            m_invertedIndex[word][std::stoi(id)] = std::stod(frequency);
        }
    }
    ifs_inverted_index.close();
    //获取最大的id值,即文档总数
    for(auto &outer: m_invertedIndex){
        if(!outer.second.empty()){
            m_N = std::max({m_N,outer.second.rbegin()->first});
        }
    }

    LOG_INFO << "[QueryEngine] Loading page_offset.dat...";
    ifstream ifs_offset { OFFSET };
    if(!ifs_offset.is_open()){
        cerr << "OFFSET open failed, please check out!" << endl;
        exit(-1); 
    }

    while (std::getline(ifs_offset, line)){
        // 忽略空行
        if (line.empty()){
            continue;
        }

        stringstream ss(line);
        // 接收三个数据项
        string id_str;
        string start_pos_str;
        string length_str;

        //  使用流提取操作符 >> 自动按空格读取
        if (ss >> id_str >> start_pos_str >> length_str){
            try{
                streamoff start_pos = string_to_streamoff(start_pos_str);
                streamoff length = string_to_streamoff(length_str);

                m_offset[id_str].push_back(start_pos);
                m_offset[id_str].push_back(length);
            }catch (const std::runtime_error &e){
                cerr << "Error converting offset values in line: \"" << line << "\". Details: " << e.what() << std::endl;
                // 跳过当前行，处理下一行
            }
        }else{
            // 如果一行中少于三个数据项，则读取失败
            cerr << "Warning: Skipping malformed line (expected 3 fields, found fewer): \"" << line << "\"" << std::endl;
        }
    }
    ifs_offset.close();

    LOG_INFO << "[QueryEngine] Corpus read finished.";
    LOG_INFO << "[QueryEngine] Instance created.";
}

//计算两个序列间的编辑距离
template<typename Sequence>
int QueryEngine::editDistance(Sequence seq1, Sequence seq2){
    int m = seq1.size();
    int n = seq2.size();
    vector<vector<int>> dp(m+1,vector<int>(n+1));
    for(int i=0; i <= m; ++i){
        dp[i][0] = i;
    }
    for(int j=0; j<=n; ++j){
        dp[0][j] = j;
    }
    for(int i=1; i<=m; ++i){
        for(int j=1; j<=n; ++j){
            int insert = dp[i-1][j] + 1;
            int remove = dp[i][j-1] + 1;
            int replace = dp[i-1][j-1] + (seq1[i-1] != seq2[j-1]);
            // 注意：std::min 的三个参数需要包含 <algorithm> 头文件
            dp[i][j] = std::min({insert, remove, replace}); 
        }
    }
    return dp[m][n];
}

//生成候选词json
json QueryEngine::KeyRecommander(const string& keyword){
    set<string> cVec;
    //将关键词分成一个个汉字字符保存到set中
    auto it = keyword.begin();
    while(it != keyword.end()){
        auto start = it;
        char32_t cp = utf8::next(it,keyword.end()); 
        string alpha = string{start, it};
        if(isChineseChar(cp)){
            cVec.insert(alpha);
        }
    }

    //获得cVec中每个字符的词语集合并取并集得到候选词及其词频的map
    map<string, int> candidate;
    for(auto &ch: cVec){
        if(m_index.find(ch) == m_index.end()){
            cerr << "can not find the charactor!" << endl;
            continue;
        }
        for(auto &idx: m_index[ch]){
            string candidate_word = m_dict[idx-1].first;
            candidate[candidate_word] = m_dict[idx-1].second;
        }
    }

    //比较器lambda
    auto cmp = [](const Pri task1, const Pri task2){
        //先比较编辑距离，编辑距离大的排后面
        if(task1.length != task2.length){ // 注意这里修正了：将 task2.frequency 改为 task2.length
            return task1.length > task2.length;
        }

        //在比较词频，词频小的排后面
        if(task1.frequency != task2.frequency){
            return task1.frequency < task2.frequency;
        }

        //最后比较字典序, 小的排前面
        return task1.word > task2.word;
    };


    priority_queue<Pri,vector<Pri>,decltype(cmp)> pq(cmp);
    for(auto &[word, amount]: candidate){
        Pri unit;
        //计算每个候选词与关键字的编辑距离
        int len = editDistance(word, keyword);
        unit.length = len;
        unit.word = word;
        unit.frequency = amount;
        pq.push(unit);
    }

    //获取前五个最匹配的推荐词
    json res = json::array();
    // 修正：确保队列中还有元素才进行 top/pop 操作
    for(int i=0; i<5 && !pq.empty(); ++i){ 
        res.push_back(pq.top().word);
        pq.pop();
    }

    LOG_INFO << "[KeyRecommanderResult] " << res.dump();
    return res;
}

//得到包含所有关键字的网页id
vector<int> findDocsWithAllKeywords(const vector<string>& queryWords, const map<string,map<int,double>>& invertedIndex) {
    vector<int> result;

    if (queryWords.empty()) return result;

    // 第一个关键字的文档集合作为初始结果
    auto it = invertedIndex.find(queryWords[0]);
    if (it == invertedIndex.end()) return result; // 第一个词不存在
    for (auto &kv : it->second) {
        result.push_back(kv.first);
    }

    // 逐步交集
    for (size_t i = 1; i < queryWords.size(); i++) {
        auto it2 = invertedIndex.find(queryWords[i]);
        if (it2 == invertedIndex.end()) {
            result.clear(); // 某个词不存在 → 没有文档同时包含
            return result;
        }

        vector<int> temp;
        vector<int> docs;
        for (auto &kv : it2->second) {
            docs.push_back(kv.first);
        }

        // 两个集合必须有序才能用 set_intersection
        // sort(result.begin(), result.end());
        // sort(docs.begin(), docs.end());

        set_intersection(result.begin(), result.end(),
                         docs.begin(), docs.end(),
                         back_inserter(temp));
        result.swap(temp);
    }

    return result;
}

//计算余弦值
double get_cos(vector<double> baseVec, vector<double> targetVec){
    double multiply = 0; //两个向量的乘积
    double modular_base = 0.0;   //向量的模
    double modular_target = 0.0;
    for (size_t i = 0; i < baseVec.size(); ++i){
        multiply += baseVec[i] * targetVec[i];
        modular_base += baseVec[i] * baseVec[i];
        modular_target += targetVec[i] * targetVec[i];
    }
    modular_base = sqrt(modular_base);
    modular_target = sqrt(modular_target);

    double denominator = modular_base * modular_target;
    // if (denominator < 1e-9){  // 使用一个小的阈值
    //     return 0.0; // 两个向量都是零或太小，相似度为0
    // }
    double res = multiply / denominator;
    return res;
}

//从用户输入得到关键字集合 (WebPageSearcher 函数的空实现)
json QueryEngine::WebPageSearcher(const string& query){
    //先对
    vector<string> pendings;
    m_tokenizer.Cut(query,pendings);

    //存放关键字及其频次
    map<string,int> keyWordsInfo;
    for(auto word: pendings){
        if(isValidKeyWord(word,m_stopWords)){
            ++keyWordsInfo[word];
        }
    }

    //TF_IDF算法得到关键字权重向量
    vector<double> keyWordsWeightVec;
    double sum_of_square = 0;
    for(auto &[keyword,frequency]: keyWordsInfo){
        double TF = static_cast<double>(frequency)/static_cast<double>(pendings.size());
        int DF = m_invertedIndex[keyword].size() + 1;
        double IDF = log((m_N+1)/DF);
        double weight = TF * IDF;
        keyWordsWeightVec.push_back(weight);

        sum_of_square += weight*weight;
    }
    //获取最大权重关键字
    int maxWeight = 0;
    int maxIdx = 0;
    for(size_t i=0;i<keyWordsWeightVec.size();++i){
        if(keyWordsWeightVec[i]>maxWeight){
            maxIdx = i;
        }
    }
    auto infoIt = keyWordsInfo.begin();
    while(maxIdx--){
        ++infoIt;
    }
    string mostKeyWord = infoIt->first;


    //归一化权重
    for(auto &weight: keyWordsWeightVec){
        weight = weight/sqrt(sum_of_square);
    }

    //先得到关键字集合
    vector<string> keyWords;
    for(auto &it: keyWordsInfo){
        keyWords.push_back(it.first);
    }
    //通过倒排索引库得到包含所有关键字的网页id
    vector<int> keyPages = findDocsWithAllKeywords(keyWords, m_invertedIndex);
    
    //如果找不到这样的网页，返回一个空数组
    if(keyPages.empty()){
        json res = json::array();
        return res;
    }

    //算出每个包含所有关键字网页的余弦值
    multimap<double,int> cosMap; //map<cosine, id>
    for(auto &id: keyPages){
        vector<double> tempWeightVec; //存放每个网页的关键字权重向量
        for(auto &key: keyWords){
            tempWeightVec.push_back(m_invertedIndex[key][id]);
        }
        //计算该网页的余弦值
        double cosine = get_cos(keyWordsWeightVec, tempWeightVec);
        cosMap.insert({cosine,id});
    }

    //按逆序遍历cosMap，获得得到每个文章的信息
    json res = json::array();
    ifstream ifs{PAGE_LIBRARY};
    int idx = 0;
    int pages = cosMap.size()<5 ? cosMap.size():5;
    //cout << "pages: " << pages << endl;
    for(auto it = cosMap.rbegin(); idx<pages && it != cosMap.rend(); ++it){
        string id = std::to_string(it->second);
        // << "id: " << endl;

        auto start = m_offset[id][0];
        auto end = m_offset[id][1];

        auto doc = get_document(ifs,start,end);

        res.push_back(json::object());
        res[idx]["id"] = doc.id;
        res[idx]["title"] = doc.title;
        res[idx]["link"] = doc.link;

        //cout << "id: " << id << ": " << doc.content << endl;

        //截取文本的前50个字符:  静态摘要
        // string abstract;
        // auto iterat = doc.content.begin();
        // int i = 0;
        // while(i<50 && iterat != doc.content.end()){
        //     auto start = iterat;
        //     utf8::next(iterat, doc.content.end());
        //     string alpha = string{start,iterat};
        //     abstract += alpha;
        //     ++i;
        // }

        //截取权重最大的关键字周围的文本： 动态摘要
        string abstract;
        auto posIndex = doc.content.find(mostKeyWord); //这是距离begin的字节数
        const char *begin = doc.content.c_str();
        const char *posIt = doc.content.c_str() + posIndex;
        const char *posEnd = posIt + doc.content.size();
        //将posIt向前移动几个位置
        auto moveCount = posIndex > 10 ? 10 : posIndex;
        // 向前退 10 个字符（而不是字节）
        for (size_t k = 0; k < moveCount && posIt != begin; ++k) {
            utf8::prior(posIt, begin);  // 安全退一个字符
        }
        int i = 0;
        while(i<50 && posIt != posEnd){
            auto start = posIt;
            utf8::next(posIt, posEnd);
            string alpha = string{start,posIt};
            //cout << "alpha: " << alpha << " ";
            abstract += alpha;
            ++i;
        }


        res[idx]["abstract"] = abstract;

        //cout << "id: " << id << ": " << abstract << endl;

        ++idx;
    }
    ifs.close();
    
    LOG_INFO << "[WebPageSearcherResult] [Found " << std::to_string(pages) << " pages]";
    
    return res;
}

streamoff QueryEngine::string_to_streamoff(const std::string& str_offset) {
    // 1. 预先检查字符串是否为空，空字符串无法转换
    if (str_offset.empty()) {
        throw std::runtime_error("Conversion failed: Input string is empty.");
    }

    try {
        // 2. 使用 std::stoll 将字符串转换为 long long。
        // long long 通常是 streamoff 的底层类型，可以保证足够的范围。
        long long ll_offset = std::stoll(str_offset);
        
        // 3. 隐式转换为 streamoff 并返回。
        // 注意：这里没有显式检查 long long 到 streamoff 的范围，因为它们通常兼容，
        // 如果需要更严格的检查，可以使用 std::numeric_limits。
        return static_cast<std::streamoff>(ll_offset);
        
    } catch (const std::invalid_argument& e) {
        // 捕获：如果字符串不包含有效的整数（例如 "abc"）
        throw std::runtime_error("Conversion failed: Invalid format (not a number). String: " + str_offset);
    } catch (const std::out_of_range& e) {
        // 捕获：如果数字太大或太小，超出了 long long 的范围
        throw std::runtime_error("Conversion failed: Number out of range for streamoff. String: " + str_offset);
    } catch (const std::exception& e) {
        // 捕获其他可能的异常
        throw std::runtime_error("Conversion failed: An unexpected error occurred. " + std::string(e.what()));
    }
}
