#include "CandidatePages.h"

CandidatePages::CandidatePages(const string& invertedIndexFile, 
                               const string& pageOffsetFile, 
                               const string& pageLibFile, 
                               const string& stopWordFile)
: m_pagesFd(open(pageLibFile.c_str(), O_RDONLY))
{
    // 检查是否打开网页库文件
    if (-1 == m_pagesFd) {
        std::cerr << pageLibFile << ": File open failed.\n";
        exit(1);
    }

    // 打开倒排索引文件
    ifstream ifsIdx{invertedIndexFile};
    if (!ifsIdx.is_open()) {
        std::cerr << invertedIndexFile << ": File open failed.\n";
        exit(1);
    }

    // 打开网页偏移库文件
    ifstream ifsPageOff{pageOffsetFile};
    if (!ifsPageOff.is_open()) {
        std::cerr << pageOffsetFile << ": File open failed.\n";
        exit(1);
    }

    // 打开停用词文件
    ifstream ifsStop{stopWordFile};
    if (!ifsStop.is_open()) {
        std::cerr << stopWordFile << ": File open failed.\n";
        exit(1);
    }

    // 读取倒排索引库
    string line;
    while (getline(ifsIdx, line)) {
        istringstream iss(line);
        string word;
        iss >> word;
        if (word.empty()) continue;

        int docId;
        double weight;
        while (iss >> docId >> weight) {
            m_invertedIndex[word][docId] = weight;
        }
    }

    // 读取网页偏移库
    while (getline(ifsPageOff, line)) {
        istringstream iss(line);
        int id;
        size_t start, size;
        iss >> id >> start >> size;
        
        m_pageOffset[id] = {start, size};
    }

    // 读取停用词库
    string stopWord;
    while (ifsStop >> stopWord) {
        m_stopWords.insert(stopWord);
    // if (m_stopWords.find(line) != m_stopWords.end()) {
    //     std::cout << "stop: " << line << std::endl;
    // }
    }

    #if DEBUG
    std::cout << "invertedIndexFIle_size: " << m_invertedIndex.size() << std::endl
              << "pageOffsetFile_size: " << m_pageOffset.size() << std::endl
              << "stopWordFile_size: " << m_stopWords.size() << std::endl;
    #endif

    ifsIdx.close();
    ifsPageOff.close();
    ifsStop.close();
}


void CandidatePages::setPageRecommender(const string& invertedIndexFile, const string& pageOffsetFile, 
                                        const string& pageLibFile, const string& stopWordFile)
{
    // 检查是否打开网页库文件
    m_pagesFd = open(pageLibFile.c_str(), O_RDONLY);
    if (-1 == m_pagesFd) {
        std::cerr << pageLibFile << ": File open failed.\n";
        exit(1);
    }

    // 打开倒排索引文件
    ifstream ifsIdx{invertedIndexFile};
    if (!ifsIdx.is_open()) {
        std::cerr << invertedIndexFile << ": File open failed.\n";
        exit(1);
    }

    // 打开网页偏移库文件
    ifstream ifsPageOff{pageOffsetFile};
    if (!ifsPageOff.is_open()) {
        std::cerr << pageOffsetFile << ": File open failed.\n";
        exit(1);
    }

    // 打开停用词文件
    ifstream ifsStop{stopWordFile};
    if (!ifsStop.is_open()) {
        std::cerr << stopWordFile << ": File open failed.\n";
        exit(1);
    }

    // 读取倒排索引库
    string line;
    while (getline(ifsIdx, line)) {
        istringstream iss(line);
        string word;
        iss >> word;
        if (word.empty()) continue;

        int docId;
        double weight;
        while (iss >> docId >> weight) {
            m_invertedIndex[word][docId] = weight;
        }
    }

    // 读取网页偏移库
    while (getline(ifsPageOff, line)) {
        istringstream iss(line);
        int id;
        size_t start, size;
        iss >> id >> start >> size;
        
        m_pageOffset[id] = {start, size};
    }

    // 读取停用词库
    string stopWord;
    while (ifsStop >> stopWord) {
        m_stopWords.insert(stopWord);
    // if (m_stopWords.find(line) != m_stopWords.end()) {
    //     std::cout << "stop: " << line << std::endl;
    // }
    }

    #if DEBUG
    std::cout << "invertedIndexFIle_size: " << m_invertedIndex.size() << std::endl
              << "pageOffsetFile_size: " << m_pageOffset.size() << std::endl
              << "stopWordFile_size: " << m_stopWords.size() << std::endl;
    #endif

    ifsIdx.close();
    ifsPageOff.close();
    ifsStop.close();
}


shared_ptr<Json> CandidatePages::getCandidatePages(const string& input)
{
    // 1、根据输入内容，构建 关键词-->权重 映射
    // 这一组权重构成了“基准向量”
    shared_ptr<unordered_map<string, double>> keywordWeight = makeKeywordToWeight(input);

    // 2、找到各个关键字在倒排索引库中对应的 map<int, double>
    vector<unordered_map<int, double>*> docsArr;
    for (auto& [word, _] : *keywordWeight) {
        auto it = m_invertedIndex.find(word);
        if (it != m_invertedIndex.end()) {
            docsArr.push_back(&(it->second));
        }
    }
    // 3、找到包含所有关键词的网页，以id形式存放在集合relatedPages中
    shared_ptr<set<int>> relatedPages = unordered_mapIntersection(docsArr); // 求上述docsArr的交集
    // 没有一个网页同时包含所有关键词，返回空数组
    if (!relatedPages || relatedPages->empty()) {
        #if DEBUG
        std::cout << "[system] No related pages." << std::endl;
        #endif
       return std::make_shared<Json>(Json::array());
    }

    // 4、遍历relatedPages，计算各网页与输入内容的相似度
    // 按照权重降序排列的 网页ID--相似度 二元组序列
    vector<std::pair<int, double>> docId_similarity = calculateSimilarity(keywordWeight, relatedPages);
    #if DEBUG
    for (auto& [docId, similarity] : docId_similarity) {
        std::cout << "docId: " << docId << ", similarity: " << similarity << std::endl;
    }
    #endif

    // 5、返回按相似度降序排序的所有相关文档
    shared_ptr<Json> retVal = std::make_shared<Json>(Json::array());
    for (auto& [docId, _] : docId_similarity) {
        auto& [docStart, docSize] = m_pageOffset[docId];
        retVal->push_back(readPageLib(docStart, docSize));
    }

    return retVal;
}


// 工具函数
// 将输入内容当成一个文档，对其分词（略过停用词）并进行TF-IDF计算，构建 关键词-->权重 的映射
shared_ptr<unordered_map<string, double>> CandidatePages::makeKeywordToWeight(const string& input)
{
    // 保存 keyword --> 归一化的TF-IDF权重
    shared_ptr<unordered_map<string, double>> keywordWeight = std::make_shared<unordered_map<string, double>>();
    
    // 1、将输入内容进行分词——过滤停用词——构建关键词集合
    // 清洗掉非中文的字符
    string cleanedInput = ChineseExtractor::extract(input);
    // string cleanedInput = std::regex_replace(input, std::regex("[\\s[:punct:][:alnum:]]+"), "");
    
    // 对 cleanedInput 进行分词
    vector<string> tokens;
    m_tokenizer.Cut(cleanedInput, tokens);
    // 过滤停用词，同时统计词语出现次数
    int allWordsCount = 0;
    for (const auto& token : tokens) {
        if (m_stopWords.find(token) != m_stopWords.end()) {
            continue;
        }
        (*keywordWeight)[token] += 1;
        ++allWordsCount;
    }
    
    // 2、利用上述关键词集合，构建基准向量（TF-IDF 算法）
    // 计算各个词语的TF值
    for (const auto& [word, count] : *keywordWeight) {
        (*keywordWeight)[word] = count / allWordsCount;
    }
    // 计算各个词语的 TF * IDF 值
    for (const auto& [word, _] : *keywordWeight) {
        double idf = std::log( (1 + m_pageOffset.size()) / (1 + m_invertedIndex[word].size()) );
        (*keywordWeight)[word] *= idf;
    }
    // 归一化 TF-IDF 权重
    double norm = 0.0;
    for (const auto& [_, weight] : *keywordWeight) {
        norm += weight * weight;
    }
    norm = std::sqrt(norm);
    for (auto& [_, weight] : *keywordWeight) {
        weight /= norm;
        #if DEBUG
        std::cout << "[word-weight] " << _ << " - " << (*keywordWeight)[_] << std::endl; 
        #endif
    }

    // 3、返回 关键词-->权重 映射
    return keywordWeight;
}


// 工具函数
// 计算基准向量与相关网页之间的余弦相似度
vector<std::pair<int, double>> CandidatePages::calculateSimilarity(shared_ptr<unordered_map<string, double>> keywordWeight, 
                                                                   shared_ptr<set<int>> relatedPages)
{
    vector<std::pair<int, double>> docId_similarity;
    for (int docId : *relatedPages) {

        double dotProduct = 0.0;
        double norm1 = 0.0;
        double norm2 = 0.0;

        for (auto& [word, baseWeight] : *keywordWeight) {
            double weightIndoc = m_invertedIndex[word][docId];

            dotProduct += baseWeight * weightIndoc;
            norm1 += baseWeight * baseWeight;
            norm2 += weightIndoc * weightIndoc;
        }
        norm1 = std::sqrt(norm1);
        norm2 = std::sqrt(norm2);
        
        if (norm1 == 0 || norm2 == 0) { // 避免除以零
            docId_similarity.push_back({docId, 0});
        } else {
            docId_similarity.push_back({docId, dotProduct / (norm1 * norm2)});
        }
    }

    // 对docId_similarity按照相似度从大到小排序
    std::sort(docId_similarity.begin(), docId_similarity.end(), 
             [](const std::pair<int, double>& a, const std::pair<int, double>& b) {
                return a.second > b.second;
             });
     
    return docId_similarity;
}


// 从网页库中读入内容
Json CandidatePages::readPageLib(size_t docStart, size_t docSize) 
{
    using namespace tinyxml2;

    // 从磁盘的网页库文件读入指定内容
    
    char* buffer = new char[docSize + 1]{};
    // 多线程安全的读取
    pread(m_pagesFd, buffer, docSize, docStart);
    
    // 建立文档解析对象
    XMLDocument doc;

    // 解析 XML 字符串
    XMLError result = doc.Parse(buffer);
    if (result != XML_SUCCESS) {
        std::cout << "XML 解析失败，错误代码: " << result << std::endl;
        delete[] buffer;
        return Json::object();
    }
    delete[] buffer;
    
    // 获取根元素
    XMLElement* root = doc.RootElement();
    if (!root) {
        std::cout << "无法找到XML根元素" << std::endl;
        return Json::object();
    }
    
    // 解析各个字段
    Json retDoc;
    XMLElement* idElement = root->FirstChildElement("id");
    if (idElement && idElement->GetText()) {
        retDoc["id"] = idElement->GetText();
    } else {
        retDoc["id"] = "None";
    }
    
    XMLElement* titleElement = root->FirstChildElement("title");
    if (titleElement && titleElement->GetText()) {
        retDoc["title"] = titleElement->GetText(); 
    } else {
        retDoc["title"] = "None";
    }

    XMLElement* linkElement = root->FirstChildElement("link");
    if (linkElement && linkElement->GetText()) {
        retDoc["link"] = linkElement->GetText();
    } else {
        retDoc["link"] = "None";
    }
    
    XMLElement* contentElement = root->FirstChildElement("content");
    if (contentElement && contentElement->GetText()) {
        string content{contentElement->GetText()};
        int maxLength = 50;  // 50个汉字

        const char* start = content.c_str();
        const char* end = content.c_str() + content.size();
        const char* it = start;
        while (it != end && maxLength) {
            utf8::next(it, end);    // 将it移动到下一个utf8字符所在的位置
            --maxLength;
        }

        string abs{start, it};
        retDoc["abstract"] = abs;
    } else {
        retDoc["abstract"] = "None";
    }
    
    return retDoc;  
}
