import jieba

txt = open("西游记.txt", "r", encoding="utf-8").read()
excludes = {'一个', '那里', '怎么', '我们', '不知', '两个', '甚么', '只见', '不是', '原来', '不敢', '闻言', '如何',
            '什么'}
words = jieba.lcut(txt)
counts = {}
for word in words:
    if len(word) == 1:
        continue
    elif word == '行者' or word == '大圣' or word == '老孙':
        rword = '悟空'
    elif word == '师父' or word == '三藏' or word == '长老':
        rword = '唐僧'
    elif word == '悟净' or word == '沙和尚':
        rword = '沙僧'
    else:
        rword = word
    counts[word] = counts.get(word, 0) + 1
for word in excludes:
    del counts[word]
items = list(counts.items())
items.sort(key=lambda x: x[1], reverse=True)
for i in range(15):
    word, count = items[i]
    print(f"{word:<10}:{count:>5}")
# 插入空格把词语分开
items = items[:15]
wc = []
for item in items:
    wc.append(item[0])
wl_split = ' '.join(wc)
# 调用generate()方法生成词云
from wordcloud import WordCloud

mywc = WordCloud(font_path='simhei.ttf').generate(wl_split)
mywc.to_file("西游记.png")
# 显示词云
import matplotlib.pyplot as plt

plt.imshow(mywc)
plt.axis('off')
plt.show()
