#pragma once

#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <set>
#include <unordered_set>
#include <map>
#include <unordered_map>
#include <regex>
#include <algorithm>
#include <cmath>

#include <cppjieba/Jieba.hpp>
#include <simhash/Simhasher.hpp>
#include <tinyxml2.h>

#include "DirectoryScanner.h"

using std::string;
using std::vector;
using std::set;
using std::map;
using std::ifstream;
using std::ofstream;

// 存放生成的库文件的目录，注意以'/'结尾(方便拼接文件名)
static const string libOutDir = "./processedCorpus/"; 

class PageProcessor
{
public:
    PageProcessor(const string& stopWordsFile);
    void process(const string& dir);

private:
    void extract_documents(const string& dir);
    void deduplicate_documents();
    void build_pages_and_offsets(const string& pageFile, const string& offsetFile);
    void build_inverted_index(const string& filename);
private:
    struct Document 
    {
        int id;
        string link;
        string title;
        string content;
    };

private:
    cppjieba::Jieba m_tokenizer;       // 分词器
    simhash::Simhasher m_hasher;       // 计算document的哈希值、去重
    set<string> m_stopWords;           // 使用set, 而非vector, 是为了方便查找
    vector<Document> m_documents;      // 去重后的文档，可生成网页库和网页偏移库
    map<string, map<int, double>> m_invertedIndex;   // 倒排索引库
};