import requests
import json
import re
import jieba
import pandas as pd
import numpy
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from os import path
import numpy as np
from PIL import Image
# 数据爬取模块
from com.py.test.reptleDolinGoodMsg.RequestReptle.TMReptle.TM_DbOperate import TM_DbOperate


def get_comments():
    all_comments = ""
    # for i in range(1, 90):
    #     url1 = "https://sclub.jd.com/comment/productPageComments.action?callback=&productId="
    #     url2 = "&score=0&sortType=5&page="
    #     url3 = "&pageSize=10&isShadowSku=0&fold=1"
    #     finalurlc = url1+"16790621987"+url2+str(i)+url3
    #     headers = {
    #         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
    #         "Referer": "https://item.jd.com/16790621987.html",
    #     }
    #     xba = requests.get(finalurlc,headers=headers)
    #     # xba.encoding = 'utf-8'
    #     data=json.loads(xba.text)
    #     for j in data['comments']:
    #         content = j['content']
    #         all_comments = all_comments+content
    # return all_comments
    db = TM_DbOperate()
    list =  db.execBySql("SELECT tb.content FROM tb_a_assess tb")
    print(len(list))
    for item in list:
        all_comments = all_comments+str(item)
    return all_comments
# 数据清洗处理模块
def data_clear():
    xt = get_comments()
    print(len(xt))
    pattern = re.compile(r'[\u4e00-\u9fa5]+')
    filedata = re.findall(pattern, xt)
    print(filedata)
    xx = ''.join(filedata)
    clear = jieba.lcut(xx)   # 切分词
    cleared = pd.DataFrame({'clear': clear})
    stopwords = pd.read_csv("chineseStopWords.txt", index_col=False, quoting=3, sep="\t", names=['stopword'], encoding='GBK')
    cleared = cleared[~cleared.clear.isin(stopwords.stopword)]

    count_words = cleared.groupby(by=['clear'])['clear'].agg(np.size)
    count_words = count_words.to_frame()
    count_words.columns = ['num']
    # count_words = cleared.groupby(by=['clear'])['clear'].agg({"num": numpy.size})
    count_words = count_words.reset_index().sort_values(by=["num"], ascending=False)
    return count_words

#词云展示模块
def make_wordclound():
    d = path.dirname(__file__)
    #模型的图像格式
    # mask = np.array(Image.open(path.join(d, "me.jpg"))) #,mask=mask
    wordcloud = WordCloud(font_path="simhei.ttf",background_color="#EEEEEE",max_font_size=250,width=1300,height=800) #指定字体类型、字体大小和字体颜色
    word_frequence = {x[0]:x[1] for x in data_clear().head(200).values}
    wordcloud = wordcloud.fit_words(word_frequence)
    plt.imshow(wordcloud)
    plt.axis("off")
    plt.show()

if __name__=="__main__":
    make_wordclound()
    print("finish")