import jieba
import wordcloud
import matplotlib.pyplot as plt

# 读取《西游记》文本文件，并进行分词
with open('xijuzi.txt', 'r', encoding='utf-8') as f:
    text = f.read()
    words = jieba.lcut(text)

# 去除无意义词语
stopwords = ['，', '。', '、', '“', '”', '！', '？', '《', '》', '：', '；', '（', '）', '的', '了', '和', '是', '在', '我', '一', '有', '不', '他', '这', '也', '就', '人', '来', '到', '候', '那', '被', '到', '去', '为']
words = [word for word in words if word not in stopwords]

# 统计词频
word_freq = {}
for word in words:
    if word in word_freq:
        word_freq[word] += 1
    else:
        word_freq[word] = 1

# 使用wordcloud库生成词云并展示
wc = wordcloud.WordCloud(font_path='msyh.ttf', background_color='white', width=1600, height=800).generate_from_frequencies(word_freq)
plt.imshow(wc, interpolation='bilinear')
plt.axis('off')
plt.show()