#include "../include/WebPageSearcher.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <cmath>
#include <algorithm>
#include <cppjieba/Jieba.hpp>

WebPageSearcher::WebPageSearcher() : _totalDocuments(0), _indexLoaded(false)
{
    std::cout << "创建网页搜索器" << std::endl;
    loadInvertedIndex();
    loadStopWords();
    loadPages();
}

void WebPageSearcher::loadInvertedIndex()
{
    std::cout << "📚 加载倒排索引库..." << std::endl;

    std::ifstream file("../output/inverted_index.dat");
    if (!file.is_open())
    {
        std::cerr << "❌ 无法打开倒排索引文件" << std::endl;
        return;
    }

    std::string line;
    int word_count = 0;

    while (std::getline(file, line))
    {
        std::istringstream iss(line);
        std::string word;
        iss >> word;

        int doc_id;
        double weight;
        while (iss >> doc_id >> weight)
        {
            _invertedIndex[word][doc_id] = weight;
            if (doc_id > _totalDocuments)
            {
                _totalDocuments = doc_id;
            }
        }

        word_count++;
        if (word_count % 1000 == 0)
        {
            std::cout << "  已加载 " << word_count << " 个词汇..." << std::endl;
        }
    }

    file.close();
    _indexLoaded = true;
    std::cout << "✅ 倒排索引加载完成: " << word_count << " 个词汇, "
              << _totalDocuments << " 个文档" << std::endl;
}

void WebPageSearcher::loadStopWords()
{
    std::cout << "📝 加载停用词..." << std::endl;

    std::ifstream file("../data/cn_stopwords.txt");
    if (!file.is_open())
    {
        std::cerr << "❌ 无法打开停用词文件" << std::endl;
        return;
    }

    std::string word;
    while (std::getline(file, word))
    {
        if (!word.empty())
        {
            _stopWords.insert(word);
        }
    }

    file.close();
    std::cout << "✅ 停用词加载完成: " << _stopWords.size() << " 个" << std::endl;
}

void WebPageSearcher::loadPages()
{
    std::cout << "📄 加载网页偏移库..." << std::endl;

    std::ifstream offsets_file("../output/offsets.dat");
    if (!offsets_file.is_open())
    {
        std::cerr << "❌ 无法打开偏移库文件" << std::endl;
        return;
    }

    // 先读取偏移信息
    std::map<int, std::pair<size_t, size_t>> offsets; // doc_id -> (offset, size)

    int doc_id;
    size_t offset, size;
    while (offsets_file >> doc_id >> offset >> size)
    {
        offsets[doc_id] = {offset, size};
    }
    offsets_file.close();

    // 读取网页库
    std::ifstream pages_file("../output/pages.dat", std::ios::binary);
    if (!pages_file.is_open())
    {
        std::cerr << "❌ 无法打开网页库文件" << std::endl;
        return;
    }

    int loaded_count = 0;
    for (const auto &[id, pos] : offsets)
    {
        pages_file.seekg(pos.first);
        std::string content(pos.second, '\0');
        pages_file.read(&content[0], pos.second);

        // 简单解析XML格式
        size_t title_start = content.find("<title>");
        size_t title_end = content.find("</title>");
        size_t link_start = content.find("<link>");
        size_t link_end = content.find("</link>");
        size_t content_start = content.find("<content>");
        size_t content_end = content.find("</content>");

        std::string title, link, page_content;

        if (title_start != std::string::npos && title_end != std::string::npos)
        {
            title = content.substr(title_start + 7, title_end - title_start - 7);
        }
        if (link_start != std::string::npos && link_end != std::string::npos)
        {
            link = content.substr(link_start + 6, link_end - link_start - 6);
        }
        if (content_start != std::string::npos && content_end != std::string::npos)
        {
            page_content = content.substr(content_start + 9, content_end - content_start - 9);
        }

        _pages[id] = {title, link, page_content};
        loaded_count++;

        if (loaded_count % 100 == 0)
        {
            std::cout << "  已加载 " << loaded_count << " 个网页..." << std::endl;
        }
    }

    pages_file.close();
    std::cout << "✅ 网页库加载完成: " << loaded_count << " 个网页" << std::endl;
}

std::vector<std::string> WebPageSearcher::tokenizeQuery(const std::string &query)
{
    static cppjieba::Jieba tokenizer;
    std::vector<std::string> words;

    // 使用cppjieba分词
    tokenizer.Cut(query, words, true);

    // 过滤停用词
    std::vector<std::string> filtered_words;
    for (const auto &word : words)
    {
        if (_stopWords.find(word) == _stopWords.end() && word.length() >= 2)
        {
            filtered_words.push_back(word);
        }
    }

    std::cout << "🔍 查询分词结果: ";
    for (const auto &word : filtered_words)
    {
        std::cout << word << " ";
    }
    std::cout << std::endl;

    return filtered_words;
}

void WebPageSearcher::calculateQueryVector(const std::string &query, std::map<std::string, double> &query_vector)
{
    auto keywords = tokenizeQuery(query);

    // 统计词频
    std::map<std::string, int> word_freq;
    for (const auto &word : keywords)
    {
        word_freq[word]++;
    }

    // 计算TF
    for (const auto &[word, freq] : word_freq)
    {
        double tf = static_cast<double>(freq) / keywords.size();

        // 计算IDF（如果词在倒排索引中）
        if (_invertedIndex.find(word) != _invertedIndex.end())
        {
            int df = _invertedIndex[word].size();
            double idf = log2(static_cast<double>(_totalDocuments) / df);
            query_vector[word] = tf * idf;
        }
        else
        {
            query_vector[word] = tf; // 如果不在索引中，只使用TF
        }
    }

    // 归一化
    double sum_of_squares = 0.0;
    for (const auto &[word, weight] : query_vector)
    {
        sum_of_squares += weight * weight;
    }

    if (sum_of_squares > 0)
    {
        double norm = sqrt(sum_of_squares);
        for (auto &[word, weight] : query_vector)
        {
            weight /= norm;
        }
    }
}

double WebPageSearcher::cosineSimilarity(const std::map<std::string, double> &query_vec,
                                         const std::map<std::string, double> &doc_vec)
{
    double dot_product = 0.0;
    double query_norm = 0.0;
    double doc_norm = 0.0;

    // 计算点积
    for (const auto &[word, query_weight] : query_vec)
    {
        if (doc_vec.find(word) != doc_vec.end())
        {
            dot_product += query_weight * doc_vec.at(word);
        }
        query_norm += query_weight * query_weight;
    }

    // 计算文档向量范数
    for (const auto &[word, doc_weight] : doc_vec)
    {
        doc_norm += doc_weight * doc_weight;
    }

    query_norm = sqrt(query_norm);
    doc_norm = sqrt(doc_norm);

    if (query_norm == 0 || doc_norm == 0)
    {
        return 0.0;
    }

    double similarity = dot_product / (query_norm * doc_norm);

    // 确保相似度在合理范围内
    return std::max(0.0, std::min(1.0, similarity));
}
std::string WebPageSearcher::generateAbstract(const std::string &content, const std::vector<std::string> &keywords)
{
    if (content.empty())
    {
        return "暂无摘要";
    }

    // 方法1：直接截取前80个字符（不进行复杂编码处理）
    std::string simple_abstract;
    int char_count = 0;

    for (size_t i = 0; i < content.length() && char_count < 80;)
    {
        unsigned char c = content[i];

        if (c < 32 && c != '\n' && c != '\t' && c != '\r')
        {
            // 跳过控制字符（换行符和制表符保留）
            i++;
            continue;
        }

        // 简单处理：直接复制字符
        if (c < 128)
        {
            // ASCII字符
            simple_abstract += c;
            i++;
            char_count++;
        }
        else if (i + 2 < content.length() && (c & 0xE0) == 0xC0)
        {
            // UTF-8 2字节字符
            simple_abstract += content.substr(i, 2);
            i += 2;
            char_count++;
        }
        else if (i + 2 < content.length() && (c & 0xF0) == 0xE0)
        {
            // UTF-8 3字节字符（中文等）
            simple_abstract += content.substr(i, 3);
            i += 3;
            char_count++;
        }
        else if (i + 3 < content.length() && (c & 0xF8) == 0xF0)
        {
            // UTF-8 4字节字符
            simple_abstract += content.substr(i, 4);
            i += 4;
            char_count++;
        }
        else
        {
            // 无法识别的字符，跳过
            i++;
        }
    }

    // 如果截取后需要添加省略号
    if (simple_abstract.length() >= 80)
    {
        // 确保不以半个中文字符结束
        size_t len = simple_abstract.length();
        while (len > 0 && (static_cast<unsigned char>(simple_abstract[len - 1]) & 0x80))
        {
            len--;
        }
        simple_abstract = simple_abstract.substr(0, len) + "...";
    }

    // 如果简单方法还是有问题，使用超简单方法
    if (simple_abstract.empty() || simple_abstract.find("��") != std::string::npos)
    {
        // 方法2：只保留ASCII可打印字符
        std::string ascii_abstract;
        for (char c : content)
        {
            if (c >= 32 && c <= 126)
            { // 可打印ASCII字符
                ascii_abstract += c;
            }
            else if (c == '\n' || c == '\t')
            {
                ascii_abstract += ' '; // 换行和制表符转为空格
            }
            if (ascii_abstract.length() >= 60)
                break;
        }

        if (!ascii_abstract.empty())
        {
            if (ascii_abstract.length() >= 60)
            {
                ascii_abstract = ascii_abstract.substr(0, 57) + "...";
            }
            return ascii_abstract;
        }

        // 方法3：最后的手段
        return "摘要内容（编码处理中）";
    }

    return simple_abstract;
}

WebPageResult WebPageSearcher::getWebPageById(int doc_id)
{
    WebPageResult result;
    result.doc_id = doc_id;

    if (_pages.find(doc_id) != _pages.end())
    {
        const auto &[title, link, content] = _pages[doc_id];
        result.title = title;
        result.link = link;
        result.abstract = content.substr(0, 50) + "..."; // 简单摘要
    }
    else
    {
        result.title = "未知网页";
        result.link = "";
        result.abstract = "内容无法获取";
    }

    return result;
}

void WebPageSearcher::doQuery(const std::string &query, std::vector<WebPageResult> &results)
{
    std::cout << "🔍 执行网页搜索: " << query << std::endl;
    results.clear();

    if (!_indexLoaded)
    {
        std::cout << "❌ 倒排索引未加载，无法搜索" << std::endl;
        return;
    }

    // 1. 计算查询向量
    std::map<std::string, double> query_vector;
    calculateQueryVector(query, query_vector);

    if (query_vector.empty())
    {
        std::cout << "⚠️  查询无有效关键词: '" << query << "'" << std::endl;
        return;
    }

    std::cout << "📊 查询分词结果: ";
    for (const auto &[word, weight] : query_vector)
    {
        std::cout << word << " ";
    }
    std::cout << std::endl;

    // 2. 找到包含关键词的文档 - 使用宽松匹配
    std::set<int> candidate_docs;
    std::set<int> strict_match_docs; // 严格匹配（包含所有关键词）
    std::set<int> loose_match_docs;  // 宽松匹配（包含任意关键词）

    bool first_word = true;

    // 先尝试严格匹配
    for (const auto &[word, weight] : query_vector)
    {
        if (_invertedIndex.find(word) != _invertedIndex.end())
        {
            std::set<int> word_docs;
            for (const auto &[doc_id, doc_weight] : _invertedIndex[word])
            {
                word_docs.insert(doc_id);
            }

            if (first_word)
            {
                strict_match_docs = word_docs;
                first_word = false;
            }
            else
            {
                // 求交集：只保留包含所有关键词的文档
                std::set<int> intersection;
                std::set_intersection(
                    strict_match_docs.begin(), strict_match_docs.end(),
                    word_docs.begin(), word_docs.end(),
                    std::inserter(intersection, intersection.begin()));
                strict_match_docs = intersection;
            }

            // 同时收集宽松匹配的文档
            for (int doc_id : word_docs)
            {
                loose_match_docs.insert(doc_id);
            }
        }
        else
        {
            // 如果某个词不在索引中，严格匹配为空
            strict_match_docs.clear();
        }
    }

    std::cout << "🎯 严格匹配文档数: " << strict_match_docs.size() << std::endl;
    std::cout << "🎯 宽松匹配文档数: " << loose_match_docs.size() << std::endl;

    // 优先使用严格匹配，如果没有则使用宽松匹配
    if (!strict_match_docs.empty())
    {
        candidate_docs = strict_match_docs;
        std::cout << "✅ 使用严格匹配结果" << std::endl;
    }
    else if (!loose_match_docs.empty())
    {
        candidate_docs = loose_match_docs;
        std::cout << "🔄 使用宽松匹配结果" << std::endl;
    }
    else
    {
        std::cout << "❌ 没有找到相关文档" << std::endl;
        return;
    }

    // 3. 计算余弦相似度并排序
    std::vector<std::pair<int, double>> scored_docs;

    for (int doc_id : candidate_docs)
    {
        // 构建文档向量
        std::map<std::string, double> doc_vector;
        for (const auto &[word, query_weight] : query_vector)
        {
            if (_invertedIndex[word].find(doc_id) != _invertedIndex[word].end())
            {
                doc_vector[word] = _invertedIndex[word][doc_id];
            }
        }

        double similarity = cosineSimilarity(query_vector, doc_vector);
        scored_docs.emplace_back(doc_id, similarity);
    }

    // 按相似度排序（降序）
    std::sort(scored_docs.begin(), scored_docs.end(),
              [](const auto &a, const auto &b)
              { return a.second > b.second; });

    std::cout << "📈 相似度范围: ";
    if (!scored_docs.empty())
    {
        std::cout << "最高=" << scored_docs[0].second
                  << ", 最低=" << scored_docs.back().second;
    }
    std::cout << std::endl;

    // 4. 生成结果
    auto keywords = tokenizeQuery(query);
    int count = 0;
    const int max_results = 10;

    for (const auto &[doc_id, score] : scored_docs)
    {
        if (count >= max_results)
            break;

        WebPageResult result = getWebPageById(doc_id);
        result.score = score;
        result.abstract = generateAbstract(std::get<2>(_pages[doc_id]), keywords);

        results.push_back(result);
        count++;

        std::cout << "  结果 " << count << ": ID=" << doc_id
                  << " 相似度=" << score
                  << " 标题=" << result.title.substr(0, 20) << "..." << std::endl;
    }

    std::cout << "✅ 生成 " << results.size() << " 个搜索结果" << std::endl;
}