"""
此类为中文预处理的工具类
主要为中文处理提供分词、删除停用词、过滤敏感词、词性标注、词频统计方法（之后会陆续修改或增加）
"""
import codecs
import os
import time
import jieba
import jieba.posseg as pseg
from collections import Counter


class Word_Pretreatment:
    def __init__(self, data_dict, stop_word_dir, sensitive_word_dir):
        """
        中文预处理
        :param data_dict: 原始数据,字典格式
        :param stop_word_dir: 停用词词库文件目录
        :param sensitive_word_dir: 敏感词词库文件目录
        """
        self.data_dict = data_dict
        self.stop_word_dir = stop_word_dir  # 停用词目录
        self.sensitive_word_dir = sensitive_word_dir  # 敏感词目录
        self.result_list = self.__get_splitter()

    def __get_splitter(self):
        """
        对中文进行分词
        :return: 分词结果
        """
        title = self.data_dict["title"]  # 文章的标题
        details = self.data_dict["details"]  # 文章的内容
        # 对文章标题进行分词
        t1 = time.time()
        strings = title + details
        # result = ""  # 记录分词结果的变量
        words_list = jieba.cut(strings)
        result = "/".join(words_list)
        results = result.split('/')  # 分词结果list
        t2 = time.time()
        # 对文章内容进行分词
        print("分词完成，耗时：" + str(t2 - t1) + "秒。")  # 反馈结果
        return results

    def del_stop_words(self):
        """
        删除停用词
        :return: 原数据字典删除停用词之后的 data
        """
        stopwords = []  # 停用词字符串list
        if os.path.isdir(self.stop_word_dir):  # 判断stop_word_dir是否为目录
            file_names = os.listdir(self.stop_word_dir)
            for filename in file_names:
                file = codecs.open(self.stop_word_dir + '\\' + filename)
                for line in file:
                    line = line.strip()
                    stopwords.append(line)  # ['stopwords.dat!', '"', '#', '$', '%', '&', "'",] list 读取文件
        del_stop_words = []  # 用于存放删除停用词之后的词列表
        for word in self.result_list:  # word 的类型是字符串 本身是中文汉字不用编码了 word = word.encode('gbk')
            if word not in stopwords:  # if u'\u4e00' <= word <= u'\u9fa5':  # 判断是否是汉字
                del_stop_words.append(word)
            else:
                pass
        self.result_list = del_stop_words
        return self

    def words_filter(self):
        """
        对word_list进行敏感词判断
        :return: exist  用于标记文章里是否有敏感词汇，默认1为有，2为无
        """
        sensitive_vocabulary = []  # 所有敏感词汇组合的list
        # 跟进敏感词目录依次打开目录中文件，并添加到敏感词汇list里
        if os.path.isdir(self.sensitive_word_dir):  # 判断sensitive_word_dir是否为目录
            file_names = os.listdir(self.sensitive_word_dir)
            for filename in file_names:
                file = codecs.open(self.sensitive_word_dir + '\\' + filename)
                for line in file:
                    line = line.strip()
                    sensitive_vocabulary.append(line)
        words_list = self.result_list
        word_in_sen = []
        for word in words_list:
            if word in sensitive_vocabulary:
                word_in_sen.append(word)
        print("该篇文章中存在的敏感词有：", word_in_sen)
        if len(word_in_sen):
            exist = 1
        else:
            exist = 2
        data = {"exist": exist, "word_in_sen": word_in_sen}
        return data

    def pos_tagging(self):
        """
        对分词结果进行标注词性
        :return: 标注词性之后的 pos_tag_words
        :return:
        """
        str_list = ''.join(self.result_list)
        print('list是===', str_list)
        words = pseg.cut(str_list)  # 进行分词
        pos_tag_words = ""
        for w in words:
            pos_tag_words += str(w.word) + "/" + str(w.flag)  # 标注词性
        print(pos_tag_words)
        print(type(pos_tag_words))
        self.result_list = pos_tag_words
        return self

    def word_frequency_count(self):
        """
        对词组list中的词语进行频率统计
        :return: 词频统计
        """
        cnt = Counter()
        word_list = self.result_list
        for word in word_list:
            cnt[word] += 1
        print(cnt.most_common())
        return cnt
