import re
import jieba
import matplotlib.pyplot as plt
import pymongo
from wordcloud import WordCloud

#
# db = pymongo.MongoClient('localhost', 27017)
# douban = db['doubantop250']
# item_info = douban['movietop250']
# f = open('word_cloud.txt', mode='w', encoding='utf-8')
# for item in item_info.find():
#     quote = item['quote']
#     f.write(quote)
# f.close()

file = open(r"word_cloud.txt", mode="r", encoding="utf-8")
txt1 = file.read()
txt2 = re.sub(r"[^\u4e00-\u9fa5]", "", txt1)
# txt2 是已经去除中文标点符号的政府工作报告文本
# print(txt2)
txt3 = jieba.cut(txt2)
# # 返回一个可迭代对象
txt4 = {}
for i in txt3:
    if i not in txt4:
        txt4[i] = 1
    else:
        txt4[i] += 1
# print(txt4)
# # 创建词语集合
txt5 = sorted(txt4.items(), key=lambda x: x[1], reverse=True)
# print(txt5)
# # 词语集合排序,得到一个列表
txt6 = {}
for word, count in txt5:
    txt6[word] = count


list1 = ['的', '是', '了', '未找到', '不要', '不是', '就是', '才', '对', '这个', '大', '小', '一个', '一部', '有', '没有', '被', '都', '在', '最', '不', '让', '不会', '就']

for i in list1:
    del txt6[i]

print(txt6)
wordcloud = WordCloud(
    background_color="white",  # 背景颜色
    font_path="simsun.ttc",  # 字体路径
    max_words=100,  # 最多显示的单词数
    max_font_size=100,  # 单词最大字号
    width=500,  # 宽度
    height=500,  # 高度
).generate_from_frequencies(txt6)
plt.figure(figsize=(8, 8))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off') # 关闭坐标轴
plt.show()