// KeywordProcessor.h--进行词法分析
#pragma once

#include <string>
#include <set>
#include <map>
#include <memory>
#include <vector>

#include <cppjieba/Jieba.hpp>
#include <utfcpp/utf8.h>

using std::string;
using std::set;
using std::map;
using std::vector;


const std::string index_cn_lib="../../../data/index_cn.lib";
const std::string dict_cn_lib="../../../data/dict_cn.lib";
const std::string index_en_lib="../../../data/index_en.lib";
const std::string dict_en_lib="../../../data/dict_en.lib";
const std::string ch_dir="../../../data/CN"; 
const std::string en_dir="../../../data/EN";
const std::string en_dict_outfile="../../../data/en_dict.dat";
const std::string cn_dict_outfile="../../../data/cn_dict.dat";
const std::string cn_index_outfile="../../../data/cn_index.dat";
const std::string en_index_outfile="../../../data/en_index.dat";
const std::string stop_cn_file="../../../data/stopwords/cn_stopwords.txt";
const std::string stop_en_file="../../../data/stopwords/en_stopwords.txt";

class KeyWordProcessor {
public:
    KeyWordProcessor();
    // chDir: 中文语料库
    // enDir: 英文语料库
    void process(const  std::string& chDir=ch_dir, 
                 const  std::string& enDir=en_dir, 
                 const  std::string& cnDictOutfile=cn_dict_outfile,
                 const  std::string& cnIndexOutfile=cn_index_outfile,
                 const  std::string& enDictOutfile=en_dict_outfile,
                 const  std::string& enIndexOutfile=en_index_outfile);
private:
    void create_cn_dict(const std::string& dir, const std::string& outfile);
    void build_cn_index(const std::string& index);
  

    void create_en_dict(const std::string& dir, const std::string& outfile);
    void build_en_index(const std::string& index);
  
    
    void load_en_stop_words(const std::string& filename=stop_en_file);
    void load_ch_stop_words(const std::string& filename=stop_cn_file);

    std::vector<std::string> split_by_space(const std::string& text);
    std::string to_lower_case(const std::string& str);
private:
    cppjieba::Jieba m_tokenizer;

    std::unique_ptr<std::set<std::string>> m_enStopWords;
    std::unique_ptr<std::set<std::string>> m_chStopWords;
    std::shared_ptr<std::map<std::string,int>> m_enWords;
    std::shared_ptr<std::map<std::string,int>> m_chWords;
    std::shared_ptr<std::map<std::string,std::vector<int>>> m_en_characters_index;
    std::shared_ptr<std::map<std::string,std::vector<int>>> m_ch_characters_index; 
};