import jieba
import pandas as pd
from base_handle import BaseHandle # 引入工具类

baseHandle = BaseHandle() #实例化

'''
1.1 Jieba 分词及词频统计
'''

def split_words_jieba(text):
    '''jieba分词操作'''
    diy_dict = (baseHandle.get_file_abspath('物流词汇大全.txt'))  # 引入自定义词典
    jieba.load_userdict(diy_dict)  # 加载自定义字典
    comment_cut = jieba.lcut(text)  # 分词结果
    # print(comment_cut)

    # 引入停用词表,当文本文件中带有英文双引号时，直接用pd.read_csv进行读取会导致行数减少,因此使用quoting=3
    stop_dic = (baseHandle.get_file_abspath('百度+哈工大+机器智能实验室停用词库.txt'))
    stopwords = pd.read_csv(stop_dic, names=['w'], header=None, sep='\t', encoding='utf-8', quoting=3)
    datas = []  # 去除停用词提取名词
    for word in comment_cut:
        if word not in list(stopwords.w) and len(word) > 1:
            datas.append(word)
    # print("分词+停用词成功\n"+str(data))
    return datas


def count_fre_words(datas):
    '''高频词统计'''
    counts = {}  # 新建1个字典
    for data in datas:
        counts[data] = counts.get(data, 0) + 1
    # 将字典列表化，并按频次排序
    items = list(counts.items())
    items.sort(key=lambda x: x[1], reverse=True)
    dt = pd.DataFrame(items, columns=['keyword', 'fre'])  # list 转换为 df
    # print(dt)
    dt1 = dt[(dt.fre != 1)]  # 删去频次为1的关键词
    dt1.to_excel('高频词统计.xlsx')  # 储存为表格


if __name__ == "__main__":
    # 1.引入语料excel数据
    text = baseHandle.read_col_merge_file(baseHandle.get_file_abspath('语料库_京东_5000条评论.xlsx'))
    # 2.jieba分词
    datas = split_words_jieba(text)
    # 3.对分词结果进行统计，最后生成【高频词统计.xlsx】文件
    count_fre_words(datas)
