#include "../include/PageProcessor.h"
#include "../include/DirectoryScanner.h"
#include "../include/log4cppuse.h"

#include <bitset>
#include <fstream>
#include <tinyxml2.h>
#include <regex>

#include <cppjieba/Jieba.hpp>
#include <utfcpp/utf8.h>
PageProcessor::PageProcessor()
: m_tokenizer()
, m_hasher( )
, m_documents(std::make_unique<std::vector<std::pair<uint64_t,Document>>>())
, m_stopWords(std::make_unique<std::set<std::string>>())
, m_invertedIndex(std::make_unique<std::map<std::string, std::map<int, double>>>())
{
    load_ch_stop_words();
}

void PageProcessor::process(const std::string& dir)
{
    extract_documents(dir);

    deduplicate_documents();

    build_pages_and_offsets();

    build_inverted_index();
   
}


//提取文档
void PageProcessor::extract_documents(const std::string& dir)
{   
    std::vector<std::string> files=DirectoryScanner::getDirectoryScanner().scan(dir);
    for (const auto& file:files) {
        // 跳过非XML文件
        if (file.find(".xml") == std::string::npos) {
            continue;
        }
        
        std::string path=file;

        // string log="extract_documents: add path of: "+path;
        // LOG_DEBUG(log.c_str());
        std::ifstream infile(path);
        if (!infile.is_open()) {
            string log="open "+path+" failed";
            LOG_ERROR(log.c_str());
            exit(1);
        }
       //进行处理文档
        tinyxml2::XMLDocument doc;
        if (doc.LoadFile(path.c_str()) != tinyxml2::XML_SUCCESS) {
            string log="load "+path+" failed";
            LOG_ERROR(log.c_str());
            exit(1);
        }
        tinyxml2::XMLElement* root = doc.RootElement();
        if (!root) {
            string log="root element not found in "+path;
            LOG_ERROR(log.c_str());
            exit(1);
        }
        tinyxml2::XMLElement* channel=root->FirstChildElement("channel");
        if (!channel) {
            string log="channel element not found in "+path;
            LOG_ERROR(log.c_str());
            exit(1);
        }

        for(tinyxml2::XMLElement* item = channel->FirstChildElement("item");
            item != nullptr;
            item = item->NextSiblingElement("item"))
        {
            Document doc_;
            tinyxml2::XMLElement* title=item->FirstChildElement("title");
            if (!title) {
                string log="title element not found in "+path;
                LOG_ERROR(log.c_str());
                continue;
            }
            doc_.title=title->GetText();
             tinyxml2::XMLElement* link=item->FirstChildElement("link");
            if (!link) {
                string log="link element not found in "+path;
                LOG_ERROR(log.c_str());
                continue;
            }
            doc_.link=link->GetText();

            // 提取内容 - 优先级: content > description
            string content;
            tinyxml2::XMLElement* content_elem = item->FirstChildElement("content");
            if (content_elem && content_elem->GetText()) {
                content = content_elem->GetText();
            } else {
                 tinyxml2::XMLElement* desc_elem = item->FirstChildElement("description");
                if (desc_elem && desc_elem->GetText()) {
                    content = desc_elem->GetText();
                }
            }
            
            if (!content.empty()) {
                // 移除HTML标签
                
                std::regex html_tags("<[^>]*>");
                content = std::regex_replace(content, html_tags, " ");
                
                // 移除多余空格
                std::regex multiple_spaces("\\s+");
                content = std::regex_replace(content, multiple_spaces, " ");
                
                // 去除首尾空格
                content = std::regex_replace(content, std::regex("^\\s+|\\s+$"), "");
                
                doc_.content = content;
                // string log="extract_documents: content of: "+content;
                // LOG_DEBUG(log.c_str());
            
            }

             // 计算文档哈希值
            uint64_t hash ;
            size_t topN = doc_.content.size() / 5 ;
            m_hasher.make(doc_.content,topN,hash);
            m_documents->emplace_back(hash, doc_);
            infile.close();
        }
    }
    string log = "PageProcessor 提取到 " + to_string(m_documents->size()) + " 个文档";
    LOG_INFO(log.c_str());
}
//去重文档
void PageProcessor::deduplicate_documents()
{
    //直接暴力解法
    for (size_t i = 0; i < m_documents->size(); ++i) {
        for (size_t j = i + 1; j < m_documents->size(); ++j) {
            if (hamming_distance((*m_documents)[i].first, (*m_documents)[j].first) <= 3) {
                // 移除重复文档
                m_documents->erase(m_documents->begin() + j);
                --j;  // 调整索引, 因为向量大小减少了1
            }
           
        }
        m_documents->at(i).second.id = i;
    }
    string log = "PageProcessor 去重后 " + to_string(m_documents->size()) + " 个文档";
    LOG_INFO(log.c_str());
}

//生成网页库和偏移库
void PageProcessor::build_pages_and_offsets(const std::string& pages, const std::string& offsets)
{
    //进行网页库的写入
     std::ofstream output_page_file(pages,std::ios::out|std::ios::trunc);
     std::ofstream output_offset_file(offsets,std::ios::out|std::ios::trunc);
    //网页库的文档内容
    for(auto& doc:*m_documents)
    {
        string page_document ="<doc>\n";
        page_document += "<id>" + to_string(doc.second.id) + "</id>\n";
        page_document += "<link>" + doc.second.link + "</link>\n";
        page_document += "<title>" + doc.second.title + "</title>\n";
        page_document += "<content>" + doc.second.content + "</content>\n";
        page_document += "</doc>\n"; 
        //写入网页库
        output_page_file << page_document;   
        //写入偏移库
        static int offset = 0;
        string offset_document=to_string(doc.second.id) + " " + to_string(offset) +" "+ to_string(page_document.size()) + "\n";
        output_offset_file << offset_document;
        offset += page_document.size();
    }
    
    output_page_file.close();
    output_offset_file.close();
    string log = "PageProcessor 生成 " + to_string(m_documents->size()) + " 个文档的网页库和偏移库";
    LOG_INFO(log.c_str());
}
//生产倒排索引库
void PageProcessor::build_inverted_index(const std::string& filename)
{
    int dict_size = m_documents->size();//文档数量
    std::vector<std::map<std::string,int>>content_dict_count(dict_size);//每个单词文档出现的次数
    std::map<std::string,int>word_to_file_count;//这个单词出现在多少文档
    std::map<int, std::map<std::string, double>> raw_tfidf; // 存储原始TF-IDF值
    for(auto& doc:*m_documents)
    {
        string file_count = doc.second.title + " " + doc.second.content;
        std::regex pattern(R"([!@#$%^&*()_+\-=\[\]{};':"\\|,.<>/?]|\b\d+\b)");
        // 步骤1: 将符号和单个数字替换为空格
        std::string processed_line = std::regex_replace(file_count, pattern, " ");
         // 步骤2: 分词
        std::vector<std::string> words;
        m_tokenizer.Cut(processed_line,words);
        // 步骤3: 过滤和统计
        for(auto& word:words)
        {
            // 跳过空字符串
            if (word.empty()) {
                continue;
            }   
            // 跳过停用词
            if (m_stopWords->find(word) != m_stopWords->end()) {
                continue;
            }
            // 跳过单个字符（可能是残留的符号或数字）
            if (word.length() == 1 && !std::isalpha(word[0])) {
                continue;
            }
            else{
                auto const &w=utf8::iterator(word.begin(),word.begin(),word.end());
                char32_t c=*w;
                if((c>=0x3400 && c<=0x4DBF) || (c>=0x4E00 && c<=0x9FFF))
                {
                    // string log="build_inverted_index: word: "+word+", doc_id: "+to_string(doc.second.id);
                    // LOG_DEBUG(log.c_str());
                    content_dict_count[doc.second.id][word]++;
                    
                }
            }
        }
    }   

    LOG_INFO("build_inverted_index: 统计每个单词出现在多少文档");
    // 统计每个单词出现在多少文档
    for(auto& doc:content_dict_count)
    {
       for(auto &[word,count]:doc)
       {
           if(count > 0)
           {
               word_to_file_count[word]++;
           }
       }
    }
    int doc_id = 0;
     //计算TF_IDF
    for(auto& doc:content_dict_count)
    {
    
        //计算总词数
        int total_words_in_doc = 0;
        for(auto& [word, count] : doc) {
            total_words_in_doc += count;
        }
       for(auto &[word_,count]:doc)
       {
           if(count > 0)
           {
                // 计算TF：词频 / 文档总词数
                double TF = static_cast<double>(count) / total_words_in_doc;
                
                // 计算IDF：log(总文档数 / 包含该词的文档数)
                // 添加除零保护
                if (word_to_file_count[word_] == 0) {
                    continue; // 或者设置为一个很小的值
                }
                double IDF = log10(static_cast<double>(dict_size) / word_to_file_count[word_]);
    
                double TF_IDF = TF * IDF;
    
                 // 保存原始TF-IDF值
                raw_tfidf[doc_id][word_] = TF_IDF;
           }
       }
       doc_id++;
    }


    //进归一化处理
     for(auto& [word_id, tfidf_map] : raw_tfidf) {
        //1.计算平方和
        double sum_squares = 0.0;
        for(auto& [word_, tfidf] : tfidf_map) {
            if(tfidf > sum_squares) {
                sum_squares = tfidf;
            }
            sum_squares += tfidf * tfidf;
        }
        //2.计算模长
        double norm = sqrt(sum_squares);

        //2.归一化并复制到新结构
        if(norm > 0.0)
        {
            for(auto& [word_, tfidf] : tfidf_map) {
                (*m_invertedIndex)[word_][word_id] = tfidf / norm;
            }
        }
    }


    //进行输出到文件
    //进行网页库的写入
    std::ofstream output_file(filename,std::ios::out|std::ios::trunc);
    //网页库的文档内容
    for(auto& [word,tfidf_map]:*m_invertedIndex)
    {
        string inverted_index_document = word + " ";
        for(auto& [word_id,tfidf]:tfidf_map)
        {
            inverted_index_document += to_string(word_id) + " " + to_string(tfidf) + " ";
        }
        inverted_index_document += "\n";
        output_file << inverted_index_document;
        
    }
        
    output_file.close();
    string log="成功生成倒排索引库 "+filename;
    LOG_INFO(log.c_str());

}

//加载中文停用词库
void PageProcessor::load_ch_stop_words(const std::string& filename)
{
        std::ifstream in_file(filename);
        if (!in_file.is_open()) {
            string log="无法打开文件 "+filename;
            LOG_ERROR(log.c_str());
            exit(1);
        }
        string line;
        while (std::getline(in_file, line)) {
            m_stopWords->insert(line);
        }
        string log="成功加载停用词库 "+filename;
        LOG_INFO(log.c_str());
        in_file.close();
}
    

 int PageProcessor::hamming_distance(uint64_t hash1, uint64_t hash2) {
        uint64_t xor_result = hash1 ^ hash2;
        std::bitset<64> bits(xor_result);
        return bits.count();  // count() 返回1的个数
}



#if 0
int main()
{
    PageProcessor process;
    string msg=process.process();
    cout << msg << endl;
}

#endif