#include "page_processor.hpp"
#include "directory_scanner.hpp"
#include <fstream>
#include <ios>
#include <sstream>
#include <cmath>
#include <string>

PageProcessor::PageProcessor()
    : m_tokenizer("resources/dictionaries/jieba.dict.utf8",
                  "resources/dictionaries/hmm_model.utf8",
                  "resources/dictionaries/user.dict.utf8",
                  "resources/dictionaries/idf.utf8",
                  "resources/dictionaries/stop_words.utf8")
    , m_hasher("resources/dictionaries/jieba.dict.utf8",
                  "resources/dictionaries/hmm_model.utf8",
                  "resources/dictionaries/idf.utf8",
                  "resources/dictionaries/stop_words.utf8") 
    , m_currentDocId(0) {
    load_stop_words("resources/corpus/stopwords/cn_stopwords.txt");
    load_stop_words("resources/corpus/stopwords/en_stopwords.txt");
}


std::string PageProcessor::read_file(const std::string& filename) {
    std::ifstream file(filename);
    if (!file.is_open()) {
        std::cerr << "无法打开文件：" << filename << std::endl;
        return "";
    }

    std::stringstream buffer;
    buffer << file.rdbuf();
    return buffer.str();
}

void PageProcessor::load_stop_words(const std::string& filename) {
    std::ifstream file(filename);
    if (!file.is_open()) {
        std::cerr << "无法打开停用词文件：" << filename << std::endl;
        return;
    }

    std::string word;
    while (std::getline(file, word)) {
        // 新增：修剪前后空白字符(包括\r\n\t空格等)
        size_t start = word.find_first_not_of(" \t\r\n");
        size_t end = word.find_last_not_of(" \t\r\n");
        if (start != std::string::npos && end != std::string::npos) {
            word = word.substr(start, end - start + 1);
        } else {
            continue; // 跳过空行
        }
        m_stopWords.insert(word);
    }
}


void PageProcessor::parse_xml(const std::string& filename) {
    tinyxml2::XMLDocument doc;
    tinyxml2::XMLError error = doc.LoadFile(filename.c_str());
    if (error != tinyxml2::XML_SUCCESS) {
        std::cerr << "无法解析XML文件：" << filename << " 错误码："  << error << std::endl;
        return;
    }

    tinyxml2::XMLElement* root = doc.RootElement();
    if (!root) {
        std::cerr << "XML文件没有根元素：" << filename << std::endl;
        return;
    }
    
    // 1. 先从根元素(rss)下找到 channel 元素
    tinyxml2::XMLElement* channel = root->FirstChildElement("channel");
    if (!channel) {
        std::cerr << "在 " << filename << " 中未找到 <channel> 元素" << std::endl;
        return;
    }
    
    // 2. 再从 channel 元素下查找第一个 item 元素
    tinyxml2::XMLElement* item = channel->FirstChildElement("item");

    while (item) {
        Document docItem;
        docItem.id = ++m_currentDocId;

        // 提取link
        tinyxml2::XMLElement* linkElem = item->FirstChildElement("link");
        if (linkElem && linkElem->GetText()) {
            docItem.link = linkElem->GetText();
        }

        // 提取title
        tinyxml2::XMLElement* titleElem = item->FirstChildElement("title");
        if (titleElem && titleElem->GetText()) {
            docItem.title = titleElem->GetText();
        }

        // 提取content或description
        tinyxml2::XMLElement* contentElem = item->FirstChildElement("content");
        if (contentElem && contentElem->GetText()) {
            docItem.content = contentElem->GetText();
        }
        else {
            tinyxml2::XMLElement* descElem = item->FirstChildElement("description");
            if (descElem && descElem->GetText()) {
                docItem.content = descElem->GetText();
            }
            else {
                item = item->NextSiblingElement("item");
                continue;
            }
        }

        // 计算simhash值
        size_t topN = 10;
        m_hasher.make(docItem.content, topN, docItem.simhash);

        m_documents.push_back(docItem);
        
        // 移动到下一个兄弟 item 元素
        item = item->NextSiblingElement("item");
    }
}


void PageProcessor::extract_documents(const std::string& dir) {
    std::vector<std::string> files = DirectoryScanner::scan(dir);

    for (const std::string& file : files) {
        // 只处理XML文件
        // std::cerr << file << std::endl;
        if (file.substr(file.find_last_of(".") + 1) == "xml") {
            parse_xml(file);
        }
    }
}

int PageProcessor::hamming_distance(uint64_t hash1, uint64_t hash2) {
    uint64_t xor_result = hash1 ^ hash2;
    return __builtin_popcountll(xor_result);
}

void PageProcessor::deduplicate_documents() {
    std::vector<Document> uniqueDocs;

    for (const Document& doc : m_documents) {
        bool isDuplicate = false;

        // 与已保存的文档比较simhash值
        for (const Document& uniqueDoc : uniqueDocs) {
            int distance = hamming_distance(doc.simhash, uniqueDoc.simhash);
            if (distance < 3) {
                isDuplicate = true;
                break;
            }
        }

        if (!isDuplicate) {
            uniqueDocs.push_back(doc);
        }
    }

    m_documents.swap(uniqueDocs);
}


std::string PageProcessor::strip_html_tags(const std::string& html) {
    std::string text;
    bool in_tag = false;
    
    for (char c : html) {
        if (c == '<') {
            in_tag = true;
        } else if (c == '>') {
            in_tag = false;
        } else if (!in_tag) {
            // 保留换行符，其他空白字符压缩为单个空格
            if (isspace(c)) {
                if (!text.empty() && !isspace(text.back())) {
                    text += ' ';
                }
            } else {
                text += c;
            }
        }
    }
    
    return text;
}


void PageProcessor::build_pages_and_offsets(const std::string& pages, const std::string& offsets) {
    std::ofstream pagesFile(pages, std::ios::binary);
    std::ofstream offsetsFile(offsets);
    
    if (!pagesFile.is_open() || !offsetsFile.is_open()) {
        std::cerr << "无法打开网页库或网页偏移库文件" << std::endl;
        return;
    }

    // 遍历所有文档，写入网页库并记录偏移信息
    for (const Document& doc : m_documents) {
        // 记录当前文件指针位置（偏移量）
        std::streampos offset = pagesFile.tellp();
        
        // 清理内容：去除HTML标签
        std::string cleanContent = strip_html_tags(doc.content);
        
        // 构造网页库条目格式（严格按顺序和格式）
        std::stringstream ss;
        ss << "<doc>" << "\n";                  // 文档开始标签
        ss << "  <id>" << doc.id << "</id>" << "\n";  // 文档ID
        ss << "  <link>" << doc.link << "</link>" << "\n";  // 文档链接
        ss << "  <title>" << doc.title << "</title>" << "\n";  // 文档标题
        ss << "  <content>" << cleanContent << "</content>" << "\n";  // 文档内容
        ss << "</doc>" << "\n\n";               // 文档结束标签，增加空行分隔
        
        std::string docContent = ss.str();
        size_t docSize = docContent.size();
        
        // 写入网页内容
        pagesFile.write(docContent.c_str(), docSize);
        
        // 写入偏移信息：id 偏移量 大小
        offsetsFile << doc.id << " " << offset << " " << docSize << "\n";
    }
}



void PageProcessor::build_inverted_index(const std::string& filename) {
    if (m_documents.empty()) {
        std::cerr << "无文档可处理，无法构建倒排索引" << std::endl;
        return;
    }

    // 第一步：计算TF（词频）和DF（文档频率）
    std::vector<std::map<std::string, int>> docTFList;  // 每个文档的词频统计
    std::map<std::string, int> dfMap;   // 词的文档频率统计
    int totalDocs = m_documents.size();    // 总文档数
    for (const Document& doc : m_documents) {
        std::map<std::string, int> tfMap;

        // 对文档内容分词
        std::vector<std::string> words;
        m_tokenizer.Cut(doc.content, words);  // 使用cppjieba默认分词（混合模式）

        // 过滤停用词并统计词频
        for (const std::string& word : words) {
            if (m_stopWords.find(word) == m_stopWords.end() && !word.empty()) {
                ++tfMap[word];
            }
        }

        docTFList.push_back(tfMap);

        // 更新文档频率（每个文档只计数一次）
        for (const auto& [word, _] : tfMap) {
            ++dfMap[word];
        }
    }

    // 第二步：计算TF-IDF并归一化
    for (size_t docIdx = 0; docIdx < m_documents.size(); ++docIdx) {
        const Document& doc = m_documents[docIdx];
        const std::map<std::string, int>& tfMap = docTFList[docIdx];

        if (tfMap.empty()) continue;  // 跳过无有效关键词的文档

        // 计算当前文档所有词的TF-IDF平方和
        double weightSum = 0.0;
        std::map<std::string, double> tfidfMap;

        for (const auto& [word, tf] : tfMap) {
            // 计算IDF：log2(N/(DF+1)), +1 避免DF为0时除零
            int df = dfMap[word];
            double idf = log2(static_cast<double>(totalDocs) / (df + 1));

            // 计算TF-IDF权重
            double tfidf = tf * idf;
            tfidfMap[word] = tfidf;
            weightSum += tfidf * tfidf;  // 累加平方和
        }

        // 归一化权重（除以平方和的平方根）
        double normFactor = sqrt(weightSum);
        if (normFactor < 1e-9) continue;  // 避免除零

        for (const auto& [word, tfidf] : tfidfMap) {
            double normalizedWeight = tfidf / normFactor;
            m_invertedIndex[word][doc.id] = normalizedWeight;
        }
    }

    // 第三步：写入倒排索引文件
    std::ofstream indexFile(filename);
    if (!indexFile.is_open()) {
        std::cerr << "无法打开倒排索引文件：" << filename << std::endl;
        return;
    }

    for (const auto& [word, docWeights] : m_invertedIndex) {
        indexFile << word << " ";
        indexFile << docWeights.size();  // 记录包含该词的文档数

        // 写入每个文档ID和对应的归一化权重
        for (const auto& [docId, weight] : docWeights) {
            indexFile << " " << docId << " " << weight; 
        }
        indexFile << "\n";
    }
}


void PageProcessor::process(const std::string& dir) {
    // 1.从XML语料中提取文档
    extract_documents(dir);
    std::cout << "提取文档完成，共找到" << m_documents.size() << "篇原始文档" << std::endl;

    // 2.文档去重
    deduplicate_documents();
    std::cout << "文档去重完成，剩余" << m_documents.size() << "篇唯一文档" << std::endl;

    // 3.生成网页库和网页偏移库
    build_pages_and_offsets("resources/data/pages.lib", "resources/data/offsets.lib");
    std::cout << "网页库和网页偏移库生成完成" << std::endl;

    // 4.生成倒排索引库
    build_inverted_index("resources/data/inverted_index.lib");
    std::cout << "倒排索引库生成完成，共包含" << m_invertedIndex.size() << "个关键词" << std::endl;
}
