"""
去除停用词
"""

import jieba
from jieba import analyse
import pandas as pd

sentence = '郭靖和哀牢山三十六剑。'
path = '../../notebook/stop-words.txt'


"""自定义停用词列表，通过列表推导式排除停用词"""
# '和' '。' 无语义，将其作为停用词排除
stop_words = '和。'
words = jieba.lcut(sentence)
# 不去除停用词
print('/'.join(words)) # 郭靖/和/哀牢山/三十六/剑/。
# 通过列表推导式去除停用词
print('/'.join(word for word in words if word not in stop_words)) # 郭靖/哀牢山/三十六/剑
print('-------------------------')


"""导入停用词字典去除停用词"""
# 通过open引入停用词词典
with open(path, 'r', encoding='utf-8') as f:
    # 必须使用strip删除行末换行符\n
    stop_words2 = [line.strip() for line in f.readlines()]
print(stop_words2[:10])
print('/'.join(word for word in words if word not in stop_words2)) # 郭靖/哀牢山/三十六/剑
print('----------------------')


# 通过pd导入停用词词典
df = pd.read_csv(path, encoding='utf-8', engine='python', sep='郭靖', names=['w'])
# print(df.head())
# print(df.w)
print('/'.join(word for word in words if word not in list(df['w']))) # 郭靖/哀牢山/三十六/剑
print('-------------------')


"""直接导入停用词词典"""
# 导入停用词词典
analyse.set_stop_words(path)

"""分词"""
# 导入的停用词词典对jiaba.cut()函数无效
print(jieba.lcut(sentence)) # ['郭靖', '和', '哀牢山', '三十六', '剑', '。']
# 停用词词典只对jieba.analyse中的函数有效
print(analyse.extract_tags(sentence)) # ['郭靖', '哀牢山', '三十六']


