#include "../../include/PageLibPreprocessor.h"
#include "../../include/hashdup.h"
#include "../../include/Logger.h"
#include "../../include/MySQLConnector.h"

PageLibPreprocessor::PageLibPreprocessor(vector<RssiTeam> &RSS)
:_RSS(RSS)
{}

PageLibPreprocessor::~PageLibPreprocessor(){}

void PageLibPreprocessor::ReadRss(){
    for (size_t idx = 0; idx != _RSS.size(); ++idx) {
        textPrep tp;
        tp.id = idx + 1;
        tp.text = _RSS[idx]._title + _RSS[idx]._description + _RSS[idx]._content;
        _prepText.push_back(tp);
    }
    for(auto it : _prepText){
        Jiebatest(it); //进行jieba分词操作
    }
}

void PageLibPreprocessor::Jiebatest(const textPrep &t1){
    vector<pair<string,double>> res;
    //vector<string> res;
    //写一个存id 和 string doule的
    Hashdup::getpHashdup()->simhasher.extract(t1.text,res,strlen(t1.text.c_str()));
    //jieba.work(t1.text,res);
    for(auto it : res){
        keyWorld kss;
        kss.id = t1.id;
        kss.world = it.first;
        _worldKey.push_back(kss);
    }
}
//计算单个单词的权重
double PageLibPreprocessor::calculateIDF(int tf,int totalDocuments, int documentFrequency){
    return  tf * (log2((static_cast<double>(totalDocuments) / (documentFrequency + 1))));
}

//得到it 和单个world
void PageLibPreprocessor::WorldKey() {
    for (auto it : _worldKey) {
        auto it1 = _keyIndex.find(it.world);
        if (it1 != _keyIndex.end()) {
            // 单词已存在于 _keyIndex 中
            it1->second.first++;
            it1->second.second.insert(it.id);
        } else {
            // 单词不存在于 _keyIndex 中，插入新的键值对
            _keyIndex.insert({it.world, {1, {it.id}}});
        }
    }
}

void PageLibPreprocessor::NormaLization() {
    // 计算每个单词的权重，并放入每一行的集合中
    for (auto& it : _keyIndex) {
        const string& word = it.first;
        double w = calculateIDF(it.second.first, _RSS.size(), it.second.second.size());
        set<pair<int, double>> tempset;
        for (int rowID : it.second.second) {
            tempset.insert({rowID, w});
        }
        vector<pair<int, double>> tempvec(tempset.begin(), tempset.end());
        _inverIntexTable[word] = tempvec;
    }

    // 计算每行的总权重
// 计算每行的总权重
    // 计算每行的总权重
    unordered_map<int, double> rowTotalWeight;
    for (auto& temp : _inverIntexTable) {
        for (auto& idx : temp.second) {
            auto tex = rowTotalWeight.find(idx.first);
            if (tex != rowTotalWeight.end()) {
                tex->second += idx.second; // 使用TF-IDF作为权重
            } else {
                rowTotalWeight[idx.first] = idx.second; // 使用TF-IDF作为权重
            }
        }
    }

    // 归一化操作
for (auto& temp : _inverIntexTable) {
    double totalWeight = 0.0;
    for (auto& it : temp.second) {
        auto zqz = rowTotalWeight.find(it.first);
        if (zqz != rowTotalWeight.end()) {
            totalWeight += zqz->second;
        }
    }
    //重点
    for (auto& it : temp.second) {
        auto zqz = rowTotalWeight.find(it.first);
        if (zqz != rowTotalWeight.end()) {
            it.second /= sqrt(totalWeight);
        }
    }
}
}

void PageLibPreprocessor::Run(const string& filename) {
    ReadRss();
    WorldKey();
    NormaLization();
    // 此时已经得到了_inverIntexTable
    
    MySQLConnector *con = MySQLConnector::getInstance();

    ofstream ofs(filename);
    if (!ofs) {
        LogError("Failed to open %s!", filename.c_str());
        return;
    }
    for (const auto& it : _inverIntexTable) {
        ofs << it.first << " ";
        string s2;
        string s1 = "insert into t_invert (word, article_id, weight) values";
        for (const auto& it2 : it.second) {
            ofs << it2.first << " " << it2.second << " ";
            s1 += "('" + it.first + "', '" + std::to_string(it2.first) + "', '" + std::to_string(it2.second) +  "'),";
        }
        ofs << endl;

        s1.back() = ';';
        con->executeQuery(s1);
    }

    ofs.close();
    LogInfo("Succeed to write xml file.");
}

