from wordcloud import WordCloud
import matplotlib.pyplot as plt
import jieba
from docx import Document

#加载要分析的docx文件 相对路径
doc=Document('Python\Test01\小学学科课程资源开发与应用.docx')

text=[]
for para in doc.paragraphs:
    text.append(para.text)

text='\n'.join(text)
sall=[word for word in jieba.cut(text,cut_all=False)]
print(sall)

#列表推导式，把不需要的元素去掉
sall = [s for s in sall if s not in ['1','：','的',',','，','、','[',']','(',')','.','。','”','\n','“',' ',':','J','-','；','—','～','《','》','和','在','与','了','中','即','对','为','是','上','也','强调','也','以','以及','做']]

#列表转字典，对重复次数进行计算
sall_set = set(sall)
lunwen_keywords = dict()
for i in sall_set:
    lunwen_keywords[i]=sall.count(i)

items = list(lunwen_keywords.items())
items.sort(key = lambda x:x[1],reverse = True)

#提取前50列表元素，同时转为字典
items_top50 = items[:50]
lunwen_keywords_top50 = dict(items_top50)
# print(lunwen_keywords_top50)

#绘制词云
wordcloud = WordCloud(font_path='Python\Test01\msyh.ttc',width=500, height=300, background_color='white').generate_from_frequencies(lunwen_keywords_top50)
plt.figure(figsize=(10, 6))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.show()

