import jieba

# with语句打开文件，会自动关闭文件
with open('西游记.txt', 'r', encoding='GB18030') as f:
    s = f.read()
# 构建排除词库
excludes = {"一个", "这个","那里", "怎么", "我们", "不知", "两个", "甚么",
            "只见", "不是", "原来", "不敢", "闻言", "如何", "什么","不曾"}
# 得到分词列表
list1 = jieba.lcut(s)
# 遍历列表，统计同词出现次数
counts = {}
for word in list1:
    if len(word) == 1:
        continue
    # 合并同词
    elif word in ['行者', '大圣', '老孙']:
        rword = '悟空'
    elif word in ['师父', '三藏', '长老']:
        rword = '唐僧'
    elif word in ['悟净', '沙和尚']:
        rword = '沙僧'
    elif word in ['老猪', '猪八戒', '呆子']:
        rword = '八戒'
    else:
        rword = word
    # 统计出每个单词的次数，保存在字典中，单词作key，次数为value
    counts[rword] = counts.get(rword, 0) + 1
print(counts)
for word in excludes:
    del counts[word]
items = list(counts.items())
items.sort(key=lambda x: x[1], reverse=True)
print(items)
# 输出前十
for i in range(10):
    word, counts = items[i]
    print(f'{word}\t{counts}')
