#include "../include/WebpageSearch.h"
#include "../include/log4cppuse.h"



#include <fstream>
#include <regex>
#include <sstream>
#include <algorithm>
#include <unordered_map>
#include <vector>
#include <map>
#include <string>
#include <memory>
#include <algorithm>
#include <cmath>
#include <unordered_map>
#include <iomanip>

#include <nlohmann/json.hpp>

using std::vector;
using std::map;
using std::set;
using std::unique_ptr;
using namespace nlohmann;


WebpageSearch::WebpageSearch()
: m_sum(0)
, m_documents_size(0)
, m_BaseWords(std::unique_ptr<map<string,int>>(new map<string,int>()))
, m_chStopWords(std::unique_ptr<std::set<std::string>>(new std::set<std::string>()))
, m_BaseVector(std::unique_ptr<std::map<std::string,double>>(new std::map<std::string,double>()))
, m_offsets(std::unique_ptr<std::map<int, std::pair<int, int>>>(new std::map<int, std::pair<int, int>>()))
, m_pageLib(std::unique_ptr<std::map<string, vector<std::pair<int, double>>>>(new std::map<string, vector<std::pair<int, double>>>()))
{   
 
    load_ch_stop_words();
    load_offsets_file();
    load_Inverted_file();
  
}

WebpageSearch::~WebpageSearch()
{
}


WebpageSearch &WebpageSearch::getWebpageSearch()
{
   static  WebpageSearch webpageSearch{};

    return webpageSearch;
}

string WebpageSearch::search(const string &content,const string &page_file_)
{
    wordSegmentation(content);
    
   


 
    string results= makeBaseVector(content,page_file_);
 
    m_BaseWords->clear();
    m_BaseVector->clear();
    return results;

}
    

#if 1
 string WebpageSearch::makeBaseVector(const string &content,const string &filename)//对content进行向量化
{
    string SearchContent;  //数据库索引的Key
    for (auto it = m_BaseWords->begin(); it != m_BaseWords->end(); it++)
    {
        SearchContent+=it->first;
    }
    
    //进行查找索引库有没有数据,没有进行计算,有的话,直接使用及时换
    auto data=PageLRUCache::getInstance().get(SearchContent);
    if(data.size()==0)
    {
        std::string log="现在进行 TF-IDF算法处理,当前文档的词语总数为："+std::to_string(m_sum);
        LOG_INFO(log.c_str());
        for(auto &word:(*m_BaseWords))
        {
            //计算TF=单词在文档中出现的次数/文档中的总单词数
            double tf=(double)word.second/(double)m_sum;
            double DF=(*m_pageLib)[word.first].size();
            if(DF==0)
            {
                continue;
            }
            double idf=log2(((double)m_documents_size/(double)(DF)));
            double w=tf*idf;
            (*m_BaseVector)[word.first]=w;
        }
        
        //进行归一化处理
        double norm=0.0;
        for(auto &w:(*m_BaseVector))
        {
            norm+=w.second*w.second;
        }
        norm=sqrt(norm);
        for(auto &w:(*m_BaseVector))
        {
            w.second/=norm;
        }

        log="现在进行 TF-IDF算法处理,当前文档的词语正在归一化";
        LOG_INFO(log.c_str());
        //进行关键字->到索引库区进行查找这个网页是否存在,不存在,返回空数组
        for(auto &word:(*m_BaseWords))
        {
            if((*m_BaseVector).find(word.first)==(*m_BaseVector).end())
            {
                (*m_BaseVector)[word.first]=0.0;
            }
        }

        if((*m_BaseVector).size()==0)
        {
            return "";
        }
        log="现在进行 TF-IDF算法处理,当前文档的词语向量";
        LOG_INFO(log.c_str());
        //进行加载网页
        data=rankDocumentsByCosineSimilarity();
        //进行及时换入缓存
        PageLRUCache::getInstance().put(SearchContent,data);
    }


    //进行网页数据加载数据查询
    vector<SearchResult> results;
    set<int>id_set;
    for(auto[id_ ,cosine_similarity,keyword_infos]:data)
    {
        SearchResult result;
        result.id=id_;
        int offset=(*m_offsets)[id_].first;
        int filesize=(*m_offsets)[id_].second;
        std::string pagecontent;
        pagecontent.resize(filesize);
        std::ifstream infile(filename);
        infile.seekg(offset);
        infile.read(&pagecontent[0],filesize);


         // 查找标题 - 使用字符串查找
        size_t title_start = pagecontent.find("<title>");
        size_t title_end = pagecontent.find("</title>");
        if (title_start != std::string::npos && title_end != std::string::npos && title_end > title_start) {
            result.title = pagecontent.substr(title_start+7 , title_end - title_start-7 );
           
        } else {
            result.title = "No Title Found";
            string log = "未找到标题";
            LOG_INFO(log.c_str());
        }

         // 查找链接 - 使用字符串查找
        size_t link_start = pagecontent.find("<link>");
        size_t link_end = pagecontent.find("</link>");
        if (link_start != std::string::npos && link_end != std::string::npos && link_end > link_start) {
            result.link = pagecontent.substr(link_start+6 , link_end - link_start-6 );
            // log = "找到链接: " + result.link;
            // LOG_DEBUG(log.c_str());
        } else {
            result.link = "No Link Found";
            string log = "未找到链接";
            LOG_INFO(log.c_str());
        }

         // 查找摘要 - 使用字符串查找
        size_t abstract_start = pagecontent.find("<content>");
        size_t abstract_end = pagecontent.find("</content>");
        if (abstract_start != std::string::npos && abstract_end != std::string::npos && abstract_end > abstract_start) {
            result.abstract = pagecontent.substr(abstract_start+9 , abstract_end - abstract_start-9 );
            
        
            //进行提取前50个字符
            if(result.abstract.length()>50)
            {
               result.abstract=safeUtf8Substr(result.abstract,50);
               string tag=result.abstract;
               result.abstract=tag;
            }
            // log = "找到摘要: " + result.abstract;
            // LOG_DEBUG(log.c_str());
        } else {
            result.abstract = "No Abstract Found";
            string log = "未找到摘要";
            LOG_INFO(log.c_str());
        }

        results.push_back(result);
        infile.close();
        
    }
   

    json results_json = json::array();
   
    for(auto &result:results)
    {
        json result_json;
        result_json["id"]=result.id;
        result_json["title"]=result.title;
        result_json["link"]=result.link;
        result_json["abstract"]=result.abstract;
        results_json.push_back(result_json);
    }

    return results_json.dump();
    
}
#endif
//对content进行分词
void WebpageSearch::wordSegmentation(const string &content)
{
    string log="输入的原始字符串为："+content;
    LOG_INFO(log.c_str());
   // 正则表达式：匹配符号和单个数字
    std::regex pattern(R"([!@#$%^&*()_+\-=\[\]{};':"\\|,.<>/?]|\b\d+\b)");
    
    // 步骤1: 将符号和单个数字替换为空格
    std::string processed_line = std::regex_replace(content, pattern, " ");

    std::stringstream iss(processed_line);


    // 步骤2: 分词
    std::vector<std::string> words;
    m_tokenizer.Cut(processed_line,words);
    
     // 步骤3: 过滤停用词
    for(auto& word:words)
    {
        // 跳过空字符串
        if (word.empty()) {
            continue;
        }

        // 跳过单个字符（可能是残留的符号或数字）
         if (m_chStopWords->find(word) != m_chStopWords->end()) {
                    continue;
        }
        // 跳过单个字符（可能是残留的符号或数字）
        if (word.length() == 1 && !std::isalpha(word[0])) {
            continue;
        }
    
        else{
            auto const &w=utf8::iterator(word.begin(),word.begin(),word.end());
            char32_t c=*w;
            if((c>=0x3400 && c<=0x4DBF) || (c>=0x4E00 && c<=0x9FFF))
            {
                 (*m_BaseWords)[word]++;
                 m_sum++;
            }
        }
    }

    log="分词后的结果为："+std::to_string(words.size());
    LOG_INFO(log.c_str());
}

//加载中文停用词
void WebpageSearch::load_ch_stop_words(const std::string& filename)
{
    std::ifstream outfile(filename);
    if (!outfile.is_open()) {
        std::string log= filename+":无法打开文件进行读取";
        LOG_ERROR(log.c_str());
        return;
    }

    std::string work;
    while (std::getline(outfile,work))
    {
        m_chStopWords->insert(std::move(work));
    }
    std::string log="加载停用词文件成功，停用词数量："+std::to_string(m_chStopWords->size());
    LOG_INFO(log.c_str());
}



void WebpageSearch::load_offsets_file(const std::string &filename)
{
    std::ifstream infile(filename);
    if (!infile.is_open()) {
        std::string log= filename+":无法打开文件进行读取";
        LOG_ERROR(log.c_str());
        return;
    }

    string line;
    while(std::getline(infile,line))
    {
        stringstream iss(line);
        string doc_id_s;
        string offset_s;
        string filesize_s;
        int doc_id=0;
        int offset=0;
        int filesize=0;
        iss>>doc_id_s>>offset_s>>filesize_s;
      
        if(doc_id_s.empty())
        {
           doc_id=0;
        }else{
            doc_id=std::stoi(doc_id_s);
        }
        if(offset_s.empty())
        {
           offset=0;
        }else{
            offset=std::stoi(offset_s);
        }
        if(filesize_s.empty())
        {
           filesize=0;
        }else{
            filesize=std::stoi(filesize_s);
        }
        (*m_offsets)[doc_id]=std::make_pair(offset,filesize);
        m_documents_size++;
    }
    std::string log="加载偏移量文件成功，文档数量："+std::to_string(m_documents_size);
    LOG_INFO(log.c_str());
}

void WebpageSearch::load_Inverted_file(const std::string &filename)
{
    std::ifstream infile(filename);
    if (!infile.is_open()) {
        std::string log= filename+":无法打开文件进行读取";
        LOG_ERROR(log.c_str());
        return;
    }

    string line;
    while(std::getline(infile,line))
    {
       
       string word;
       string word_id;
       string tfidf;
       int index=0;
       stringstream iss(line);
       string first_word;
       iss>>first_word;
       while(iss>>word_id>>tfidf)
       {
           std::pair<int,double>id_tfidf=std::make_pair(std::stoi(word_id),std::stod(tfidf));
           (*m_pageLib)[first_word].push_back(id_tfidf);
       }
    }
    string log="加载倒排索引文件成功，索引数为："+std::to_string(m_pageLib->size());
    LOG_INFO(log.c_str());
}







// 进行文档排名，返回文档ID、余弦相似度和关键词信息
vector<tuple<int, double, vector<KeywordInfo>>> WebpageSearch::rankDocumentsByCosineSimilarity()
{
    string log="正在进行余弦相似度计算";
    LOG_INFO(log.c_str());
    // 1. 计算与查询相关的文档模长
    unordered_map<int, double> docNorms;

    unordered_map<int, vector<KeywordInfo>> docKeywords; // 存储每个文档使用的关键词及其权重
    
    for (const auto& [queryWord, queryWeight] : *m_BaseVector) {
        auto it = m_pageLib->find(queryWord);
        if (it != m_pageLib->end()) {
            for (const auto& [docId, docWeight] : it->second) {
                // 只累加与查询词相关的文档权重的平方
                docNorms[docId] += docWeight * docWeight;
                // 记录文档使用的关键词及其权重
                docKeywords[docId].push_back({queryWord, queryWeight, docWeight});
            }
        }
    }
    // 计算模长（平方根）
    for (auto& [docId, normSq] : docNorms) {
        normSq = sqrt(normSq);
    }
    
    // 2. 计算查询向量模长
    double queryNorm = 0.0;
    for (const auto& [word, weight] : *m_BaseVector) {
        queryNorm += weight * weight;
    }
    queryNorm = sqrt(queryNorm);
    
    if (queryNorm == 0) return {};
    
    // 3. 计算点积
    unordered_map<int, double> dotProducts;
    for (const auto& [word, queryWeight] : *m_BaseVector) {
        auto it = m_pageLib->find(word);
        if (it != m_pageLib->end()) {
            for (const auto& [docId, docWeight] : it->second) {
                dotProducts[docId] += queryWeight * docWeight;
            }
        }
    }
    
    // 4. 计算余弦相似度
    vector<tuple<int, double, vector<KeywordInfo>>> results;
    for (const auto& [docId, dotProduct] : dotProducts) {
        if (docNorms[docId] > 0) {
            double similarity = dotProduct / (queryNorm * docNorms[docId]);
            // 获取该文档使用的关键词信息
            vector<KeywordInfo> keywordInfo;
            if (docKeywords.find(docId) != docKeywords.end()) {
                keywordInfo = docKeywords[docId];
            }
            results.push_back({docId, similarity, keywordInfo});
        }
    }
    
    // 5. 按相似度降序排序
    sort(results.begin(), results.end(), 
         [](const auto& a, const auto& b) { 
             return get<1>(a) > get<1>(b); 
         });
    
    log="余弦相似度计算完成，匹配文档数量："+std::to_string(results.size());
    LOG_INFO(log.c_str());
    return results;

}

std::string WebpageSearch::safeUtf8Substr(const std::string& str, size_t max_chars)
{
    if (str.empty() || max_chars == 0) {
        return "";
    }
    
    try {
        // 使用UTF-8迭代器安全计数
        auto it = str.begin();
        auto end = str.end();
        size_t char_count = 0;
        std::string result;
        
        while (it != end && char_count < max_chars) {
            auto prev_it = it;
            utf8::next(it, end);  // 移动到下一个UTF-8字符
            
            // 检查是否成功移动到下一个字符
            if (it > prev_it) {
                result.append(prev_it, it);
                char_count++;
            } else {
                break;  // UTF-8解码失败
            }
        }
        
        return result;
        
    } catch (const utf8::exception& e) {
        // UTF-8解码失败，回退到简单截取
        LOG_ERROR(("UTF-8解码错误，使用简单截取: " + std::string(e.what())).c_str());
        return str.substr(0, std::min(str.size(), max_chars * 3)); // 假设最坏情况，每个中文字符3字节
    }
}
    
 #if 0
int main()
{
    while(true)
    {
        std::cout<<"请输入要查询的单词:"<<std::endl;
        string content;
        cin>>content;
        WebpageSearch &webpageSearch=WebpageSearch::getWebpageSearch();
        string results=webpageSearch.search(content);
        std::cout<<results<<std::endl;
        content.clear();
    }
}
 #endif