import jieba
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import jieba.analyse
from pyquery import PyQuery
import pymysql

def get_content():
    db = pymysql.connect(host='localhost', user="root", passwd="123456", db="mysql", port=3306, charset="utf8")


    time111 = db.cursor()
    time111.execute("SELECT time1 FROM message")
    # print(time111.fetchall())
    # if len(time111.fetchall()) >0 :
    time11 = (time111.fetchall()[0])[0]
    #print(time11)

    db_cursor = db.cursor()
    sql = "select context from tb_weibos where time='"+str(time11)+"'"
    #print(sql)
    db_cursor.execute(sql)
    rows = db_cursor.fetchall()
    #print(rows)
    with open('record.txt', 'w',encoding='utf-8') as f:
        for row in rows:
            f.write(str(row[0]))
            f.write('\r\n')
    db.close()

    santi_text = open('record.txt', 'r', encoding='utf-8').read() #读取本地文档
    return santi_text

#jieba.enable_parallel(4) # 开启并行分词模式，参数为并行进程数 



# 创建停用词list
def stopwordslist(filepath):
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
    return stopwords

# 对句子去除停用词
def movestopwords(sentence):
    stopwords = stopwordslist('stop_words.txt')  # 这里加载停用词的路径
    santi_words =[x for x in sentence if len(x) >1 and x not in stopwords]
    return santi_words

def main_wcd():
    jieba.load_userdict('自定义词库.txt')#加载外部 用户词典
    words = jieba.cut(PyQuery(get_content()).text()) #去除HTML标签
    word_list = movestopwords(words) # 去除停用词
    words_split = " ".join(word_list) #列表解析为字符串

#     print('以下是tf-tdf算法-------------------------------------------------')
#     keywords_tf = jieba.analyse.extract_tags(words_split, topK=100, withWeight=True,allowPOS=('ns', 'n', 'vn', 'v')) # tf-tdf算法
#     for item in keywords_tf:
#          print(item[0],item[1])

#     print('以下是textrank算法-------------------------------------------------')
#     keywords_rank = jieba.analyse.textrank(words_split, topK=100, withWeight=True,allowPOS=('ns', 'n', 'vn', 'v')) #textrank算法
#     for item in keywords_rank:
#          print(item[0],item[1])

    # print('纯词频统计')
    # mycount = Counter(word_list) # 统计词频
    # for key, val in mycount.most_common(10):  # 有序（返回前10个）
    #     # print(str(time11),key,val)

    wc = WordCloud(
    collocations=False,
    # 设置字体，不指定就会出现乱码
    font_path=r'.\simhei.ttf',
    # 设置背景色
    background_color='white',
    # 设置背景宽
    width=500,
    # 设置背景高
    height=350,
    # 最大字体
    max_font_size=50,
    # 最小字体
    min_font_size=10,
    )

    my_wordcloud = wc.generate(words_split) #按词频生成词云
    plt.imshow(my_wordcloud) #展示词云
    plt.axis("off") #去除横纵轴
    plt.show()
    wc.to_file('E:\\pycharm\\Demo\\vue_flask\\app\\src\\assets\\day_cloud.png') # 保存图片文件

if __name__ == '__main__':
    main_wcd()