import collections
import re
import time
import jieba
import matplotlib.pyplot as plt
import numpy
from wordcloud import WordCloud
import pymysql
from PIL import Image
import io
import sys

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')  # 改变标准输出的默认编码
# 连接数据库获取今日的所有话题（话题可能在榜时间长，多次重复出现）
db = pymysql.connect(host="localhost", user="root", password="root", database="resoudata")
cursor = db.cursor()
cursor.execute("select topic from t_weibohot where daydate=CURDATE()")
wbresult = cursor.fetchall()
cursor.execute("select topic from t_jinrihot where daydate=CURDATE()")
jrresult = cursor.fetchall()
cursor.execute("select topic from t_txhot where daydate=CURDATE()")
txresult = cursor.fetchall()
cursor.execute("select topic from t_baiduhot where daydate=CURDATE()")
bdresult = cursor.fetchall()

#每次清空文本
onfile = open('topic.txt','w')
onfile.close()
#写入并读取文本
with open('topic.txt', encoding='utf-8',mode="w+") as f:
    for result in wbresult:
        f.write("".join(result)+"\n")
    for result in jrresult:
        f.write("".join(result)+"\n")
    for result in txresult:
        f.write("".join(result)+"\n")
    for result in bdresult:
        f.write("".join(result) + "\n")
with open('topic.txt',encoding='utf-8') as f:
    data = f.read()

# 文本预处理  去除一些无用的字符   只提取出中文出来
new_data = re.findall('[\u4e00-\u9fa5]+', data, re.S)
new_data = " ".join(new_data)

# jieba分词且筛选，str改存为list格式，最后统计
# 全模式
# seg_list_exact = jieba.cut(new_data, cut_all=True)
# 精确模式
seg_list_exact = jieba.cut(new_data, cut_all=False)
# 搜索引擎模式
# seg_list_exact = jieba.cut_for_search(new_data)
result_list = []
for word in seg_list_exact:
    #去除单个词
    if len(word) > 1:
        result_list.append(word)
# print(result_list)
# 统计
word_counts = collections.Counter(result_list)
# print(word_counts)

# 绘制词云
# 背景图地址
color_mask = numpy.array(Image.open('src/main/resources/static/img/hot.jpg'))
my_cloud = WordCloud(
    background_color='white',# 设置背景颜色
    mask=color_mask,# 设置背景图片
    scale=20,  # 长宽拉伸程度程度设置为20
    max_words=400,            # 词云显示的最大词语数量
    font_path='simhei.ttf',   # 设置字体  显示中文
    max_font_size=99,         # 设置字体最大值
    min_font_size=16,         # 设置子图最小值
    random_state=50           # 设置随机生成状态，即多少种配色方案
).generate_from_frequencies(word_counts)
# 显示生成的词云图片
plt.imshow(my_cloud, interpolation='bilinear')
# 显示设置词云图中无坐标轴
plt.axis('off')

#保存词云图
date=time.strftime('%Y-%m-%d', time.localtime())
# 生成词云图片`
try:
    my_cloud.to_file('E:/vueprojects/hottop/src/assets/wordcloud/' + date+'.jpg')
    print("词云图生成成功")
except BaseException as ex:
    print(ex)