#define LOGGER_LEVEL LL_WARN 
#include "WebPagePreprocessor.hpp"
#include"simhash/Simhasher.hpp"
using namespace simhash;
using namespace tinyxml2;

namespace se
{

bool operator==(const WebPage & lhs, const WebPage & rhs)//判断两文档是否相等
{

}
bool operator<(const WebPage & lhs, const WebPage & rhs)//对文档按docId进行排序
{
    return lhs._docId < rhs._docId;
}
/* WebPage::WebPage(string & doc, Configuration & config, SplitTool & jieba)//构造函数 */
WebPage::WebPage(string & doc)//构造函数
:_doc(doc)
{

}
int WebPage::getDocId()//获取文档的docId
{
    return _docId;
}
string WebPage::getTitle()//获取文档
{
    return _docTitle; 
}
string WebPage::getLink()//获取文档
{
    return _docLink; 
}
string WebPage::getDestription()//获取文档
{
    return _docDescription; 
}
string WebPage::summary(const vector<string> & queryWords)//
{

}
map<string,int> & WebPage::getWordsMap()//获取文档的词频统计map
{

}
/* void WebPage::processDoc(const string & doc, Configuration & config, SplitTool &)//对格式化文档进行处理 */
void WebPage::processDoc(const string & doc)//对格式化文档进行处理
{
    XMLDocument xmlDoc(doc.c_str());
    XMLElement* pDoc = xmlDoc.RootElement();

    XMLElement* pDocId = pDoc->FirstChildElement( "docid" );
    _docId = atoi(pDocId->GetText());

    XMLElement* pTitle = pDocId->FirstChildElement( "title" );
    _docTitle = pTitle->GetText();

    XMLElement* pLink = pTitle->FirstChildElement( "link" );
    _docLink = pLink->GetText();

    XMLElement* pDescription = pLink->FirstChildElement( "description" );
    _docDescription = pDescription->GetText();

    /* cout << "webDoc.ErrorID" << webDoc.ErrorID() << endl; */

}    
void WebPage::calTopK(vector<string> & wordVec, int k, set<string> & stopWordList)//求取文档的topk词集
{

}
/* vector<string> _webFiles;//网页文件的路径 */
/* string _doc;//整篇文档，包含xml在内 */
/* int _docId;//文档id */
/* string _docTitle;//文档标题 */
/* string _docUrl;//文档URL */
/* string _docContent;//文档内容 */
/* string _docSummary;//文档摘要，需自动生成，不是固定的 */
/* vector<string> _topWords;//词频最高的前20个词 */
/* map<string,int> _wordsMap;//保存每篇文档的所有词语和词频 */
/* const static int TOPK_NUMBER = 20; */

PageLibPreprocessor::PageLibPreprocessor(Configuration & conf)//构造函数
{

}
void PageLibPreprocessor::doProcess()//执行预处理
{

}
void PageLibPreprocessor::readInfoFromFile()//根据配置信息读取网页库和位置偏移库的内容
{
    //
    std::ifstream ifs1;
    ifs1.open("../../config/WebLibFile/newoffset.dat",std::ios::in);//读入偏移库
    std::string line,word;
    int tmpid;

    while(getline(ifs1,line))
    {
        std::istringstream indexNum(line);
        indexNum>>word;
        tmpid=stoi(word);
        indexNum>>word;
        _pos.first=stoi(word);
        _pos.second=stoi(word);
        _offsetLib[tmpid]=_pos;//存入偏移信息

    }

    //
    std::string tmpLines;
    std::ifstream ifs2;
   //
    
        
    ifs2.open("../../config/WebLibFile/newoffset.dat");    
    std::unordered_map<int,std::pair<int,int>>::iterator iter=_offsetLib.begin();
    
    _pageLib.clear(); //清空容器； 
    
    while(iter!=_offsetLib.end())
        {
            long length=iter->second.second;//article's size
            ifs2.seekg(iter->second.first,ifs2.beg);
            char*buff=new char[length];
            ifs2.read(buff,length);
            tmpLines=buff;//读入每篇网页内容
            
            WebPage web(tmpLines);//
            web.processDoc(tmpLines);
            ///////>>>>>>>>
            //_pageLib[tmpId]=tmpLines;           
            _pageLib.push_back(web);
            delete []buff;
            buff=nullptr;
        }
}
void PageLibPreprocessor::cutRedundantPages()//对冗余的网页进行去重
{
    //哈希去重
    
    Simhasher simhasher("dict/jieba.dict.utf8", "dict/hmm_model.utf8", "dict/idf.utf8", "dict/stop_words.utf8");
    std::vector<uint64_t>tmpHash;//临时存储哈希值
    std::string inQueryCont;//查询内容
    size_t topN=5;//高频词
    uint64_t hashRes=0;//哈希值
    int result;//去重结果
    /*
     * @计算哈希值
    */
    
    for(auto&pageindex:_pageLib)
    {

        //std::vector<WebPage>::iterator iter= _pageLib.begin();//网页库的容器迭代器
        inQueryCont=pageindex.getDestription()+pageindex.getTitle();
        simhasher.extract(inQueryCont,_res,topN);//计算哈希值
        simhasher.make(inQueryCont,topN,hashRes);//返回哈希值
        tmpHash.push_back(hashRes);
    }

    /*
     * @网页去重
     */
    
        
    for(size_t i=0;i!=tmpHash.size();i++)
    {
        std::vector<uint64_t>::iterator jtor= tmpHash.begin();//临时哈希值容器迭代器
        std::vector<WebPage>::iterator iter= _pageLib.begin();//网页库的容器迭代器
       for(size_t j=0;j!=tmpHash.size();j++)
      // while(jtor!=tmpHash.end())
        {   
            result=Simhasher::isEqual(tmpHash[i],tmpHash[j]);//h
            if(1==result&&i!=j)
            {
                _pageLib.erase(iter);
                tmpHash.erase(jtor);
            }
            iter++;
            jtor++;
        }
    }
}

void PageLibPreprocessor::buildInvertIndexTable()//创建倒排索引表
{

}
void PageLibPreprocessor::storeOnDisk()//将经过预处理之后的网页库、位置偏移库和倒排索引表写回到磁盘上
{//有些臃肿，建议传参形式调用；
    ofstream ofs1("../../config/WebLibFile/newripepage.dat");
    if(!ofs1.good())
    {
        cout << "ofstream not good" << endl;
        return;
    }
    ofstream ofs2("../../config/WebLibFile/newoffset.dat");
    if(!ofs2.good())
    {
        cout << "ofstream not good" << endl;
        return;
    }
    int v_cnt = 0;
    for(auto & page :_pageLib)
    {
        ++v_cnt;
        int position1 = ofs1.tellp();
        ofs1 << "<doc> " 
            << " <docid> " << v_cnt << " </docid>"
            << " <title> " << page.getTitle() << " </title> "
            << " <link> " << page.getLink()  << " </link> "
            << " <description> " << page.getDestription() << "</description>" 
            << " </doc>" 
            << endl;
        int position2 = ofs1.tellp();
        int length = position2 - position1;
        _offsetLib.insert(std::make_pair(v_cnt, std::make_pair(position1, length)));//初始偏移库
    }

    for(auto & offset :_offsetLib)
    {
        ofs2 << offset.first << " " << offset.second.first << " " << offset.second.second << endl;
    }

    ofs1.close();
    ofs2.close();
}

void PageLibPreprocessor::storeOnDisk_2()//将经过预处理之后的网页库、位置偏移库和倒排索引表写回到磁盘上
{
/***************************************************************///去重后的偏移库
    ofstream ofsHash1("../../config/WebLibFile/afterHashripepage.dat");
    if(!ofsHash1.good())
    {
        cout << "ofstream ofsHash1 not good" << endl;
        return;
    }

    ofstream ofsHash2("../../config/WebLibFile/afterHashOffset.dat");
    if(!ofsHash2.good())
    {
        cout << "ofstream ofsHash2 not good" << endl;
        return;
    }

    unordered_map<int, pair<int, int>> _offsetLib2;//网页偏移库对象
/**************************************************************///
    int v_cnt = 0;
    for(auto & page :_pageLib)
    {
        ++v_cnt;
        int position_hash1 = ofsHash1.tellp();
        
        ofsHash1 << "<doc> " 
            << " <docid> " << v_cnt << " </docid>"
            << " <title> " << page.getTitle() << " </title> "
            << " <link> " << page.getLink()  << " </link> "
            << " <description> " << page.getDestription() << "</description>" 
            << " </doc>" 
            << endl;
        int position_hash2 = ofsHash1.tellp();
        int length_hash = position_hash1 - position_hash2;
        _offsetLib2.insert(std::make_pair(v_cnt, std::make_pair(position_hash1, length_hash)));//去重后偏移库
    }


    for(auto & offset2 :_offsetLib2)
    {
        ofsHash2 << offset2.first << " " << offset2.second.first << " " << offset2.second.second << endl;
    }
    ofsHash1.close();
    ofsHash2.close();
}

/* WordSegmentation _jieba;//分词对象 */
/* vector<WebPage> _pageLib;//网页库的容器对象 */
/* unordered_map<int, pair<int, int>> _offsetLib;//网页偏移库对象 */
/* unordered_map<string, vector<pair<int, double>>> _invertIndexTable;//倒排索引表对象 */

}//end of namespace se


