import numpy as np
import wordcloud
from PIL import Image
from matplotlib import pyplot as plt
import jieba
def p(x):
    return x[1]
#打开文件并读取
test=open("E:\\python_code\\期末\\2.txt",'r',encoding='UTF-8').read()
#用jieba库的lcut对文章进行精准分词，并得到一个列表类型
words=jieba.lcut(test)
#创建空字典
counts={}
for word in words:
    #若字符长度为1则不可能组成词语且可能会为标点符号，因此排除他们
    if len(word)==1:
        continue
    # 把分出的词当作键，出现次数为值，初次出现时为0+1，随后每出现一次次数+1
    else:
        counts[word]=counts.get(word,0)+1
#将counts中的所有键值对取出并制成列表类型
items=list(counts.items())
#将列表中元素排序
items.sort(key=p,reverse=True)
#print("出现次数前十的词语为：")
# for i in range(10):
#     word,count=items[i]
#     print("{0:<10}{1:>5}".format(word,count))

mask=np.array(Image.open('111.png'))
wc=wordcloud.WordCloud(
        font_path='C:/Windows/Fonts/simhei.ttf',
        width=500, height=400,
        mask=mask,
        max_words=200,
        max_font_size=100,
        background_color='white',
        font_step=3,
        random_state=False,
        prefer_horizontal=0.9)
print(counts)
wc.generate_from_frequencies(counts)
#image_colors = wordcloud.ImageColorGenerator(mask)
#wc.recolor(color_func=image_colors)
plt.imshow(wc)
plt.axis('off')
plt.show()

