// PageProcessor.cpp - 初始框架
#include <iostream>
#include <fstream>
#include "../include/PageProcessor.h"
#include "../include/DirectoryScanner.h"
#include "../include/tinyxml2.h" // XML解析库
#include <regex>                 // 添加这个头文件
#include <cmath>
#include <sstream>
#include <algorithm>
#include <iomanip>
#include <climits>
#include "utfcpp/utf8.h"
using std::cout;
using std::endl;
using std::fstream;
using std::ifstream;
using std::string;
using namespace tinyxml2;
// 1. 构造函数
PageProcessor::PageProcessor() : m_tokenizer(), m_hasher(), m_stopWords(),
                                 m_documents(), m_invertedIndex()
{
    cout << "pageprocessor初始化" << endl;
    // 1. 加载停用词
    load_stopwords("data/cn_stopwords.txt", m_stopWords); // 我是以v1为基地，相对就是以v1为相对
    // 4. 显示初始化结果
    cout << "停用词数量: " << m_stopWords.size() << endl;
    cout << "文档数量: " << m_documents.size() << endl;
    cout << "倒排索引大小: " << m_invertedIndex.size() << endl;
    cout << "=== PageProcessor初始化完成 ===" << endl;
};

// 2. 主流程函数（空实现）
void PageProcessor::process(const std::string &dir)
{
    cout << "开始处理目录: " << dir << endl;
    extract_documents(dir);
    deduplicate_documents();
    build_pages_and_offsets("output/pages.dat", "output/offsets.dat");
    build_inverted_index("output/inverted_index.dat");
    cout << "处理完成" << endl;
}

// 3. 各个功能函数（空实现）
void PageProcessor::extract_documents(const string &dir)
{
    cout << "【extract_documents】开始从XML提取文档" << endl;

    auto files = DirectoryScanner::scan(dir);
    cout << "找到 " << files.size() << " 个文件" << endl;

    if (files.empty())
    {
        cout << "警告: 目录中没有找到XML文件" << endl;
        return;
    }

    int total_docs = 0;

    for (const auto &file : files)
    {
        cout << "解析文件: " << file << endl;

        XMLDocument doc;
        if (doc.LoadFile(file.c_str()) != XML_SUCCESS)
        {
            cout << "  错误: 无法加载XML文件" << endl;
            continue;
        }

        // 1. 获取rss根节点
        XMLElement *root = doc.FirstChildElement("rss");
        if (!root)
        {
            cout << "  错误: 找不到<rss>根节点" << endl;
            continue;
        }

        // 2. 获取channel节点
        XMLElement *channel = root->FirstChildElement("channel");
        if (!channel)
        {
            cout << "  错误: 找不到<channel>节点" << endl;
            continue;
        }

        // 3. 遍历channel下的所有item
        XMLElement *item = channel->FirstChildElement("item");
        int file_docs = 0;

        while (item)
        {
            Document document;

            // 4. 提取文档信息 - RSS格式的标签名
            XMLElement *titleElem = item->FirstChildElement("title");
            if (titleElem && titleElem->GetText())
            {
                document.title = titleElem->GetText();
            }

            XMLElement *linkElem = item->FirstChildElement("link");
            if (linkElem && linkElem->GetText())
            {
                document.link = linkElem->GetText();
            }

            // RSS中内容通常在<description>标签
            XMLElement *contentElem = item->FirstChildElement("description");
            if (contentElem && contentElem->GetText())
            {
                document.content = contentElem->GetText();
            }

            // 如果没有ID，自动生成一个
            document.id = total_docs + 1;

            // 如果成功提取到内容，添加到文档集合
            if (!document.content.empty())
            {
                m_documents.push_back(document);
                file_docs++;
                total_docs++;

                cout << "  提取文档: ID=" << document.id
                     << ", 标题=" << (document.title.empty() ? "无标题" : document.title.substr(0, 20))
                     << "..., 内容长度=" << document.content.length() << endl;
            }
            else
            {
                cout << "  跳过文档: 内容为空" << endl;
            }

            // 移动到下一个<item>
            item = item->NextSiblingElement("item");
        }

        cout << "  本文件提取了 " << file_docs << " 个文档" << endl;
    }

    cout << "【extract_documents】完成！总共提取 " << total_docs << " 个文档" << endl;
    cout << "文档向量大小: " << m_documents.size() << endl;
}
// ---------------------------------------------------------------------------------
// ---------------------------------------------------------------------------------
void PageProcessor::deduplicate_documents()
{
    std::cout << "【deduplicate_documents】文档去重开始" << std::endl;

    if (m_documents.empty())
    {
        std::cout << "警告: 没有文档需要去重" << std::endl;
        return;
    }

    std::vector<Document> unique_documents;
    std::map<uint64_t, int> hash_to_index; // simhash值到文档索引的映射

    int duplicate_count = 0;

    for (size_t i = 0; i < m_documents.size(); ++i)
    {
        const auto &doc = m_documents[i];

        // 计算文档的simhash值
        uint64_t hash_value;
        std::vector<std::pair<std::string, double>> features;

        // 提取文档的关键特征并生成simhash
        m_hasher.extract(doc.content, features, 10); // 取前10个重要特征
        m_hasher.make(doc.content, 10, hash_value);

        // 检查是否已经存在相似的文档
        bool is_duplicate = false;
        for (const auto &[existing_hash, existing_idx] : hash_to_index)
        {
            // 计算汉明距离 - 使用位运算
            uint64_t xor_result = existing_hash ^ hash_value;
            int hamming_distance = 0;

            // 计算异或结果中1的个数（汉明距离）
            while (xor_result)
            {
                hamming_distance += xor_result & 1;
                xor_result >>= 1;
            }

            // 汉明距离小于等于3认为是重复文档
            if (hamming_distance <= 3)
            {
                is_duplicate = true;
                duplicate_count++;

                std::cout << "  发现重复文档: ID=" << doc.id
                          << " 与 ID=" << m_documents[existing_idx].id
                          << " 汉明距离=" << hamming_distance << std::endl;
                break;
            }
        }

        if (!is_duplicate)
        {
            unique_documents.push_back(doc);
            hash_to_index[hash_value] = unique_documents.size() - 1;

            std::cout << "  保留唯一文档: ID=" << doc.id
                      << ", simhash=" << hash_value << std::endl;
        }
    }

    // 更新文档集合
    m_documents = std::move(unique_documents);

    std::cout << "【deduplicate_documents】完成！"
              << "原始文档数: " << m_documents.size() + duplicate_count
              << ", 去重后: " << m_documents.size()
              << ", 移除重复: " << duplicate_count << std::endl;
}
// ---------------------------------------------------------------------------------
// ---------------------------------------------------------------------------------

void PageProcessor::build_pages_and_offsets(const std::string &pages, const std::string &offsets)
{
    std::cout << "【build_pages_and_offsets】生成网页库和偏移库" << std::endl;

    if (m_documents.empty())
    {
        std::cout << "警告: 没有文档可处理" << std::endl;
        return;
    }

    // 打开网页库文件（二进制写入）
    std::ofstream pages_file(pages, std::ios::binary);
    if (!pages_file.is_open())
    {
        std::cout << "错误: 无法打开网页库文件 " << pages << std::endl;
        return;
    }

    // 打开偏移库文件（文本写入）
    std::ofstream offsets_file(offsets);
    if (!offsets_file.is_open())
    {
        std::cout << "错误: 无法打开偏移库文件 " << offsets << std::endl;
        pages_file.close();
        return;
    }

    size_t current_offset = 0;
    int success_count = 0;

    std::cout << "开始生成 " << m_documents.size() << " 个文档的网页库和偏移库..." << std::endl;

    for (const auto &doc : m_documents)
    {
        // 去除HTML标签
        std::string clean_title = removeHtmlTags(doc.title);
        std::string clean_content = removeHtmlTags(doc.content);

        // 按照PDF要求的格式构建文档XML
        std::stringstream doc_stream;
        doc_stream << "<doc>\n";
        doc_stream << "<id>" << doc.id << "</id>\n";
        doc_stream << "<link>" << doc.link << "</link>\n";
        doc_stream << "<title>" << clean_title << "</title>\n";
        doc_stream << "<content>" << clean_content << "</content>\n";
        doc_stream << "</doc>\n";

        std::string doc_content = doc_stream.str();
        size_t doc_size = doc_content.size();

        // 写入网页库（二进制方式）
        pages_file.write(doc_content.c_str(), doc_size);

        if (pages_file.fail())
        {
            std::cout << "  错误: 写入网页库失败，文档ID=" << doc.id << std::endl;
            continue;
        }

        // 写入偏移库：<文档id> <偏移量> <文档大小>
        offsets_file << doc.id << " " << current_offset << " " << doc_size << "\n";

        if (offsets_file.fail())
        {
            std::cout << "  错误: 写入偏移库失败，文档ID=" << doc.id << std::endl;
            continue;
        }

        // 每处理100个文档输出一次进度
        if (success_count % 100 == 0)
        {
            std::cout << "  已处理 " << success_count << " 个文档..." << std::endl;
            // 显示清理前后的对比（前几个文档）
            if (success_count < 300)
            {
                std::cout << "    文档ID=" << doc.id
                          << ", 标题清理: " << (doc.title.length() - clean_title.length()) << " 字符"
                          << ", 内容清理: " << (doc.content.length() - clean_content.length()) << " 字符" << std::endl;
            }
        }

        // 更新偏移量
        current_offset += doc_size;
        success_count++;
    }

    pages_file.close();
    offsets_file.close();

    std::cout << "【build_pages_and_offsets】完成！" << std::endl;
    std::cout << "✅ 成功生成 " << success_count << " 个文档" << std::endl;
    std::cout << "✅ 网页库文件: " << pages << " (总大小: " << current_offset << " 字节)" << std::endl;
    std::cout << "✅ 偏移库文件: " << offsets << " (" << success_count << " 条记录)" << std::endl;
}
// 去除HTML标签的辅助函数
std::string PageProcessor::removeHtmlTags(const std::string &html)
{
    if (html.empty())
    {
        return html;
    }

    std::string result = html;

    // 方法1: 使用正则表达式去除HTML标签
    try
    {
        std::regex html_pattern("<[^>]*>");
        result = std::regex_replace(result, html_pattern, "");
    }
    catch (const std::regex_error &e)
    {
        // 如果正则表达式失败，使用简单方法
        std::cout << "正则表达式错误，使用简单方法去除HTML标签" << std::endl;
        result = removeHtmlTagsSimple(result);
    }

    // 去除多余的空白字符
    result = normalizeWhitespace(result);

    return result;
}

// 简单的HTML标签去除方法（备用）
std::string PageProcessor::removeHtmlTagsSimple(const std::string &html)
{
    std::string result;
    bool in_tag = false;

    for (char c : html)
    {
        if (c == '<')
        {
            in_tag = true;
        }
        else if (c == '>')
        {
            in_tag = false;
        }
        else if (!in_tag)
        {
            result += c;
        }
    }

    return result;
}

// 规范化空白字符
std::string PageProcessor::normalizeWhitespace(const std::string &text)
{
    std::string result;
    bool last_was_space = false;

    for (char c : text)
    {
        if (std::isspace(static_cast<unsigned char>(c)))
        {
            if (!last_was_space)
            {
                result += ' ';
                last_was_space = true;
            }
        }
        else
        {
            result += c;
            last_was_space = false;
        }
    }

    // 去除首尾空格
    size_t start = result.find_first_not_of(" \t\n\r");
    if (start == std::string::npos)
    {
        return "";
    }
    size_t end = result.find_last_not_of(" \t\n\r");
    return result.substr(start, end - start + 1);
}

// 可选：添加验证函数
void PageProcessor::verify_generated_files(const std::string &pages, const std::string &offsets, int expected_count)
{
    std::cout << "\n【文件验证】" << std::endl;

    // 验证偏移库行数
    std::ifstream offsets_check(offsets);
    std::string line;
    int actual_count = 0;
    while (std::getline(offsets_check, line))
    {
        if (!line.empty())
        {
            actual_count++;
        }
    }
    offsets_check.close();

    if (actual_count == expected_count)
    {
        std::cout << "✅ 偏移库验证通过: " << actual_count << " 条记录" << std::endl;
    }
    else
    {
        std::cout << "❌ 偏移库验证失败: 期望 " << expected_count << " 条，实际 " << actual_count << " 条" << std::endl;
    }

    // 验证网页库文件存在且非空
    std::ifstream pages_check(pages, std::ios::binary);
    if (pages_check.is_open())
    {
        pages_check.seekg(0, std::ios::end);
        size_t file_size = pages_check.tellg();
        pages_check.close();

        if (file_size > 0)
        {
            std::cout << "✅ 网页库验证通过: 文件大小 " << file_size << " 字节" << std::endl;
        }
        else
        {
            std::cout << "❌ 网页库验证失败: 文件为空" << std::endl;
        }
    }
    else
    {
        std::cout << "❌ 网页库验证失败: 无法打开文件" << std::endl;
    }
}
// ---------------------------------------------------------------------------------
// ---------------------------------------------------------------------------------
void PageProcessor::build_inverted_index(const std::string &filename)
{
    std::cout << "【build_inverted_index】生成倒排索引开始" << std::endl;

    if (m_documents.empty())
    {
        std::cout << "警告: 没有文档可处理" << std::endl;
        return;
    }

    // 清空倒排索引
    m_invertedIndex.clear();

    int total_documents = m_documents.size();
    std::cout << "处理 " << total_documents << " 个文档..." << std::endl;

    // 第一步: 计算每个词的文档频率(DF)和词频(TF)
    std::map<std::string, int> document_frequency;
    std::map<std::string, std::map<int, double>> term_frequency;

    int doc_count = 0;
    for (const auto &doc : m_documents)
    {
        doc_count++;
        if (doc_count % 100 == 0)
        {
            std::cout << "  正在处理第 " << doc_count << " 个文档..." << std::endl;
        }

        std::vector<std::string> words;

        // 使用cppjieba对文档内容进行分词
        m_tokenizer.Cut(doc.content, words, true);

        // 统计词频
        std::map<std::string, int> word_count;
        int total_words_in_doc = 0;

        for (const auto &word : words)
        {
            // 使用严格的中文关键词过滤
            if (isPureChineseWord(word))
            {
                word_count[word]++;
                total_words_in_doc++;
            }
        }

        // 显示前几个文档的分词效果
        if (doc_count <= 2)
        {
            std::cout << "文档" << doc.id << " 中文分词示例: ";
            int count = 0;
            for (const auto &[word, freq] : word_count)
            {
                if (count < 15)
                {
                    std::cout << word << " ";
                    count++;
                }
            }
            std::cout << std::endl;
        }

        // 计算TF并更新DF
        for (const auto &[word, count] : word_count)
        {
            double tf = static_cast<double>(count) / total_words_in_doc;
            term_frequency[word][doc.id] = tf;

            if (term_frequency[word].count(doc.id) == 1)
            {
                document_frequency[word]++;
            }
        }
    }

    std::cout << "分词完成，共提取 " << term_frequency.size() << " 个唯一中文词汇" << std::endl;

    // 如果中文词汇太少，可能是过滤太严格，尝试放宽条件
    if (term_frequency.size() < 100)
    {
        std::cout << "警告: 中文词汇过少，可能过滤太严格" << std::endl;
    }

    // 第二步: 计算TF-IDF权重并进行归一化
    std::cout << "计算TF-IDF权重..." << std::endl;

    std::map<int, std::vector<double>> doc_weights;

    // 计算每个词在每个文档中的TF-IDF
    for (const auto &[word, doc_tf_map] : term_frequency)
    {
        double idf = 0.0;

        if (document_frequency[word] > 0)
        {
            idf = log2(static_cast<double>(total_documents) / document_frequency[word]);
        }

        for (const auto &[doc_id, tf] : doc_tf_map)
        {
            double tfidf = tf * idf;
            m_invertedIndex[word][doc_id] = tfidf;
            doc_weights[doc_id].push_back(tfidf);
        }
    }

    // 第三步: 归一化处理
    std::cout << "进行归一化处理..." << std::endl;

    for (auto &[word, doc_weights_map] : m_invertedIndex)
    {
        for (auto &[doc_id, weight] : doc_weights_map)
        {
            double sum_of_squares = 0.0;
            if (doc_weights.find(doc_id) != doc_weights.end())
            {
                for (double w : doc_weights[doc_id])
                {
                    sum_of_squares += w * w;
                }
            }

            if (sum_of_squares > 0)
            {
                double normalized_weight = weight / sqrt(sum_of_squares);
                doc_weights_map[doc_id] = normalized_weight;
            }
        }
    }

    // 第四步: 写入倒排索引文件
    std::cout << "写入倒排索引文件..." << std::endl;

    std::ofstream index_file(filename);
    if (!index_file.is_open())
    {
        std::cout << "错误: 无法打开倒排索引文件 " << filename << std::endl;
        return;
    }

    int word_count = 0;

    // 按词汇排序，使输出更整齐
    std::vector<std::string> sorted_words;
    for (const auto &[word, _] : m_invertedIndex)
    {
        sorted_words.push_back(word);
    }
    std::sort(sorted_words.begin(), sorted_words.end());

    for (const auto &word : sorted_words)
    {
        const auto &doc_weights_map = m_invertedIndex[word];

        index_file << word;

        for (const auto &[doc_id, weight] : doc_weights_map)
        {
            index_file << " " << doc_id << " " << std::fixed << std::setprecision(6) << weight;
        }
        index_file << "\n";

        word_count++;

        // 显示前30个中文关键词作为示例
        if (word_count <= 30)
        {
            std::cout << "  " << word_count << ". " << word;
            int count = 0;
            for (const auto &[doc_id, weight] : doc_weights_map)
            {
                if (count < 2)
                {
                    std::cout << " " << doc_id << "(" << std::fixed << std::setprecision(4) << weight << ")";
                }
                count++;
            }
            if (doc_weights_map.size() > 2)
            {
                std::cout << " ...";
            }
            std::cout << std::endl;
        }
    }

    index_file.close();

    std::cout << "【build_inverted_index】完成！" << std::endl;
    std::cout << "✅ 纯中文关键词数: " << word_count << std::endl;
    std::cout << "✅ 倒排索引文件: " << filename << std::endl;

    // 显示统计信息
    if (!m_invertedIndex.empty())
    {
        auto it = m_invertedIndex.begin();
        std::cout << "示例词汇: " << it->first;
        int count = 0;
        for (const auto &[doc_id, weight] : it->second)
        {
            if (count < 3)
            {
                std::cout << " " << doc_id << "(" << weight << ")";
            }
            count++;
        }
        std::cout << std::endl;
    }
}
// 检查是否为中文字符
bool PageProcessor::isChineseChar(char32_t c)
{
    // 基本汉字范围
    if (c >= 0x4E00 && c <= 0x9FFF)
        return true;
    // 扩展A区
    if (c >= 0x3400 && c <= 0x4DBF)
        return true;
    // 扩展B区
    if (c >= 0x20000 && c <= 0x2A6DF)
        return true;
    // 常用标点符号（可选）
    if (c >= 0x3000 && c <= 0x303F)
        return true;
    return false;
}

// 检查是否为中文词汇
bool PageProcessor::isChineseWord(const std::string &word)
{
    if (word.empty() || word.length() < 2)
    {
        return false;
    }

    // 检查是否包含中文字符
    bool has_chinese = false;
    try
    {
        auto it = word.begin();
        auto end = word.end();
        while (it != end)
        {
            char32_t codepoint = utf8::next(it, end);
            if (isChineseChar(codepoint))
            {
                has_chinese = true;
            }
            else if (!std::isspace(static_cast<unsigned char>(codepoint)))
            {
                // 如果包含非中文非空格的字符，认为不是纯中文词
                return false;
            }
        }
    }
    catch (...)
    {
        // UTF-8解码失败，认为是非中文
        return false;
    }

    return has_chinese;
}

// // 加强版中文关键词验证
// bool PageProcessor::isValidChineseKeyword(const std::string &word)
// {
//     if (!isChineseWord(word))
//     {
//         return false;
//     }

//     // 过滤停用词
//     if (m_stopWords.find(word) != m_stopWords.end())
//     {
//         return false;
//     }

//     // 过滤过短的词（但已经是中文词，长度至少为2）
//     if (word.length() < 2)
//     {
//         return false;
//     }

//     // 过滤纯标点符号
//     bool has_content = false;
//     for (char c : word)
//     {
//         if (!std::ispunct(static_cast<unsigned char>(c)))
//         {
//             has_content = true;
//             break;
//         }
//     }
//     if (!has_content)
//     {
//         return false;
//     }

//     return true;
// }
// 加强版中文关键词验证 - 只保留纯汉字
bool PageProcessor::isValidChineseKeyword(const std::string &word)
{
    if (word.empty() || word.length() < 2)
    {
        return false;
    }

    // 检查是否只包含汉字（排除所有标点、数字、字母、特殊符号）
    int chinese_char_count = 0;
    int total_chars = 0;

    try
    {
        auto it = word.begin();
        auto end = word.end();
        while (it != end)
        {
            char32_t codepoint = utf8::next(it, end);
            total_chars++;

            // 只接受基本汉字和扩展汉字
            if ((codepoint >= 0x4E00 && codepoint <= 0x9FFF) || // 基本汉字
                (codepoint >= 0x3400 && codepoint <= 0x4DBF) || // 扩展A
                (codepoint >= 0x20000 && codepoint <= 0x2A6DF))
            { // 扩展B
                chinese_char_count++;
            }
            else
            {
                // 包含非汉字字符，直接拒绝
                return false;
            }
        }
    }
    catch (...)
    {
        // UTF-8解码失败，认为是非中文
        return false;
    }

    // 必须全部是汉字字符
    if (chinese_char_count != total_chars)
    {
        return false;
    }

    // 过滤停用词
    if (m_stopWords.find(word) != m_stopWords.end())
    {
        return false;
    }

    // 过滤过短的词
    if (word.length() < 2)
    {
        return false;
    }

    return true;
}

// 简单版本（如果UTF8库有问题）
bool PageProcessor::isPureChineseWord(const std::string &word)
{
    if (word.empty() || word.length() < 2)
    {
        return false;
    }

    // 简单检查：每个字节都在中文字符的UTF-8范围内
    // 中文UTF-8通常是3字节：0xE4-0xE9 开头
    for (size_t i = 0; i < word.length();)
    {
        unsigned char c = static_cast<unsigned char>(word[i]);

        if ((c >= 0xE4 && c <= 0xE9) &&
            (i + 2 < word.length()) &&
            (static_cast<unsigned char>(word[i + 1]) >= 0x80) &&
            (static_cast<unsigned char>(word[i + 2]) >= 0x80))
        {
            // 可能是中文字符，跳过3个字节
            i += 3;
        }
        else
        {
            // 非中文字符
            return false;
        }
    }

    // 过滤停用词
    if (m_stopWords.find(word) != m_stopWords.end())
    {
        return false;
    }

    return true;
}

// 添加关键词验证函数
bool PageProcessor::isValidKeyword(const std::string &word)
{
    if (word.empty() || word.length() < 2)
    {
        return false;
    }

    // 过滤停用词
    if (m_stopWords.find(word) != m_stopWords.end())
    {
        return false;
    }

    // 过滤纯数字
    if (std::all_of(word.begin(), word.end(), ::isdigit))
    {
        return false;
    }

    // 过滤特殊字符开头的词
    if (!std::isalnum(static_cast<unsigned char>(word[0])))
    {
        return false;
    }

    // 过滤包含过多特殊字符的词
    int special_char_count = 0;
    for (char c : word)
    {
        if (!std::isalnum(static_cast<unsigned char>(c)))
        {
            special_char_count++;
        }
    }
    if (special_char_count > word.length() / 2)
    {
        return false;
    }

    return true;
}
// ---------------------------------------------------------------------------------
// ---------------------------------------------------------------------------------
void PageProcessor::load_stopwords(const std::string &filename, std::set<std::string> &stopwords)
{
    // std::cout << "加载停用词: " << filename << std::endl;
    // TODO: 实现停用词加载
    ifstream fin(filename);
    if (!fin.is_open())
    {
        cout << "停用词文件打开失败" << filename << endl;
        return;
    }
    string word;
    int count = 0;
    while (getline(fin, word))
    { // 清理单词（去除首尾空格）
        // cout << "getline:   " << word << endl;
        // 正确的清理方式
        size_t start = word.find_first_not_of(" \t\n\r");
        if (start == string::npos)
        {
            continue; // 空行跳过
        }
        size_t end = word.find_last_not_of(" \t\n\r");
        word = word.substr(start, end - start + 1);
        if (!word.empty())
        {
            // cout << "要插入set的数据：" << word << endl;
            stopwords.insert(word);
            count++;
        }
    }
    fin.close();
    cout << "加载停用词: " << filename << " (" << count << " 个)" << endl;
}