# coding:utf-8 ^_^
# @Date   :
# @desc   : 分词处理

import re
import os
import jieba


res = re.compile(r'[\u4e00-\u9fa5]')  # 匹配中文内容


class FenCi(object):

    def __init__(self, filepath, shoppath, cidianPath=None):
        self.filepath = filepath
        self.shoppath = shoppath
        self.cidianPath = cidianPath

    def openfile(self):
        try:
            text = ""
            print('正在读取分词文件...')
            # 读取需要分词的文件
            with open(self.filepath, 'r', encoding='utf-8') as f:
                # 处理分词数据，只取中文内容
                while True:
                    for it in f.readlines():
                        a_list = res.findall(it)  # 匹配中文字体
                        if not a_list:
                            continue
                        else:
                            text += ''.join(a_list) + ','
                    else:
                        break
            return text
        except Exception as e:
            print('ERROE:读取文件失败 【{}】 '.format(e))

    def fenci(self, text):
        fenci_list = []  # 处理后的分词
        try:
            print('正在进行分词操作...')
            # 调用自定义词典
            if os.path.exists(str(self.cidianPath)):
                jieba.load_userdict(self.cidianPath)
            else:
                pass

            # 生成分词，返回list
            fenci_lis = jieba.lcut(text)

            print('正在进行停词筛选...')
            stop_words = []   # 记录所有停词数据
            with open(self.shoppath, 'r', encoding='utf-8') as f:
                for it in f.readlines():
                    stop_words.extend(it.split())

            # 处理分词，去掉停用词内容
            for it in fenci_lis:
                if it in stop_words or len(it) == 1:
                    continue
                else:
                    fenci_list.append(it)

            return fenci_list
        except Exception as e:
            print('ERROE:文件分词失败 【{}】 '.format(e))

    def count_sort(self, fen_list):
        fenci_count = {}  # 每个词出现的次数
        try:
            print('正在进行统计词频...')
            # 统计词出现的次数
            for it in fen_list:
                if it in fenci_count.keys():
                    fenci_count[it] += 1
                else:
                    fenci_count[it] = 1
        except Exception as e:
            print("ERROR: 统计词量失败 【{}】".format(e))

        try:
            print('正在进行分词排序...')
            # 对分词进行排序，从大到小
            global fenci_sort
            fenci_sort = list(fenci_count.items())
            fenci_sort.sort(key=lambda x: x[1], reverse=True)

            # for i in range(10):  # 显示排行的数据
            #     w, c = fenci_sort[i]
            #     print("{0:<5} {1:<5}".format(w, c))
        except Exception as e:
            print('ERROR：分词排序失败 【{}】'.format(e))

        try:
            print("正在进行分词数据写入文件...")
            with open('处理后的分词.txt', 'w', encoding='utf-8') as f:
                for it in fenci_sort:
                    f.writelines(it[0])
                    f.write(',')
        except Exception as e:
            print('ERROR：处理后的分词写入文件失败 【{}】'.format(e))

    def main(self):
        # 判断停词文件是否存在
        if os.path.exists(self.shoppath):
            text = self.openfile()
            fen_sort_lis = self.fenci(text=text)
            self.count_sort(fen_sort_lis)
            print("分词部分执行结束！！")
        else:
            print("ERROR：[{}]停词文件不存在，请先创建文件！".format(self.shoppath))
            return None


if __name__ == '__main__':
    path = r"E:\lz\py36\fenci\秦霄贤.txt"
    sp = r"E:\lz\py36\fenci\stopwords.txt"
    cd = r"E:\lz\py36\fenci\cidian.txt"
    fc = FenCi(path, sp, cidianPath=cd)
    fc.main()
