import requests
from lxml import etree
import pandas as pd
import time
from snownlp import SnowNLP
import jieba
from jieba import analyse
import matplotlib.pyplot as plt
from matplotlib import font_manager
import numpy as np
from PIL import Image
import wordcloud

# 设置pandas显示的行数
pd.set_option('display.max_columns', 5)  # 显示无列


def getData():
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36",
        "cookie": 'll="118282"; bid=IIz3jruMyTg; __utmc=30149280; __utmz=30149280.1669209596.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmc=223695111; __utmz=223695111.1669209596.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __gads=ID=abfc4019aa179765-22540dbea3d8005e:T=1669209596:RT=1669209596:S=ALNI_MYsudJCEc6aHeVQYZDztdQlv9L-PQ; _vwo_uuid_v2=D08E687E19072534C979F8B08473C540B|68b866978ca87fc731d53f3453947e6b; __yadk_uid=yl0zb02E11DWIWEClSQ6NCtH1L3kt6ch; ap_v=0,6.0; __gpi=UID=00000b82119b51f3:T=1669209596:RT=1669249526:S=ALNI_MbhmLLhCjjZzTfLNi0uijXzOmCj4A; dbcl2="264707283:Q8AXOvBj4Lc"; ck=W9Q4; push_noty_num=0; push_doumail_num=0; __utmv=30149280.26470; __utma=30149280.652306454.1669209596.1669249526.1669253232.4; __utmb=30149280.0.10.1669253232; __utma=223695111.1753308218.1669209596.1669249526.1669253232.4; __utmb=223695111.0.10.1669253232; _pk_ref.100001.4cf6=["","",1669253232,"https://www.baidu.com/link?url=ahKQHJzKaGNB9Obx1QdIerkup-IL4bzDSwX-vUJrGp5d88VkHz9GWS1VvgBldPin&wd=&eqid=a8a99d450001ab3800000006637e1df8"]; _pk_ses.100001.4cf6=*; _pk_id.100001.4cf6=422c2adc998f6e8b.1669209596.4.1669253239.1669250111.'
    }
    for i in range(30):
        # for i in range(1):
        url = f"https://movie.douban.com/subject/1292722/comments?start={i * 20}&limit=20&status=P&sort=new_score"
        response = requests.get(url, headers=headers)
        response.encoding = 'utf-8'
        text = response.text
        # print(text)

        # with open('titanic.html','w',encoding='utf-8') as f:
        #     f.write(text)

        html = etree.HTML(text)
        # comments = html.xpath('//div[@class="comment"]/div[@class="comment-item"]')
        comments = html.xpath('//div[@id="comments"]/div[@class="comment-item "]')

        for comment in comments:
            author = comment.xpath('div[@class="avatar"]/a/@title')[0]
            vote = comment.xpath('//span[contains(@class, "votes") and contains(@class, "vote-count")]/text()')[0]  # 有用数
            short = comment.xpath('//span[@class="short"]/text()')[0]  # 短评
            # print(author, vote, short)
            with open('titanic.csv', 'a', encoding='utf-8') as f:
                # f.write(f"{author.decode('gb18030')}\t{short.decode('gb18030')}\t{vote.decode('gb18030')}\n")
                f.write(f"{author}\t{short}\t{vote}\n")
        print(f"已下载第{i + 1}页数据 ================== ")
        # time.sleep(1)


def convert(data: str):
    """对文本内容进行情感分析"""
    return SnowNLP(data).sentiments


def csv2dfNLP():
    """
    csv转dataframe并对评论内容进行情感分析
    :return:
    """
    # header=None不读取表头，names=['author', 'short', 'vote']给原csv数据新增表头行
    df = pd.read_csv('titanic.csv', sep='\t', header=None, names=['author', 'short', 'vote'])  # sep csv文件分割符
    # print(df.head())
    # print(df.shape)
    #     获取评论数据进行情感分析
    # df['sentiment'] = df['short'].apply(convert)
    df['sentiment'] = df.short.apply(convert)
    """
    by:通过哪个列排序
    ascending：是否升序排序
    inplace：是否在原数据上进行修改
    """
    df.sort_values(by='sentiment', ascending=False, inplace=True)
    # 保存新增情感分析的数据
    df.to_csv('titanicNLP.csv', sep='\t', index=False)  # index是否需要行索引
    # print(df[:10])
    # print(df[-10:])


def jiebaCutVisualization():
    """
    通过jieba分词对内容进行分词，并分析和可视化展示
    :return:
    tolist()把DataFrame某列转为list
    """
    df = pd.read_csv('titanic.csv', sep='\t', header=None, names=['author', 'short', 'vote'])  # sep csv文件分割符

    datas = '|'.join([d for d in df['short'].tolist()])  # 获取所有的评论内容，使用'|'分割
    # 分词
    gen = jieba.cut(datas)  # 返回generator
    words = ' '.join(gen)
    # print(words)
    # 对jieba分好的词进行jieba分析
    """
    topK：前500条数据
    withWeight:显示权重
    """
    tags = analyse.extract_tags(words, topK=500, withWeight=True)
    # print(tags)
    # 把tags转为dataframe
    df2 = pd.DataFrame(tags, columns=["word", "weight"])
    # print(df.head())

    # 数据可视化，绘制横向柱状图
    # 这里需要设置一下中文文字格式，否则不会显示，会出现乱码
    my_font = font_manager.FontProperties(fname="C:\Windows\Fonts\MSYHL.TTC")

    # 设置图像大小
    plt.figure(figsize=(12, 6), dpi=120)

    # 绘制条形图,[::-1]
    plt.barh(y=np.arange(0, 20), width=df2[:20]['weight'][::-1], color='blue')
    plt.ylabel('weight')

    # # 设置字符串到Y轴
    plt.yticks(range(20), df2[:20]['word'][::-1], fontproperties=my_font)

    # 设置显示网格
    # plt.grid(alpha=0.3)

    # 设置图片的右边框和上边框为不显示
    ax = plt.gca()
    ax.spines['right'].set_color('none')
    ax.spines['top'].set_color('none')

    # # 保存图片
    # plt.savefig('./titanic.png')

    # 显示图形
    # plt.show()

    titanic = np.array(Image.open('titanic.webp'))  # 词云图片
    tages2dict = dict(tags)
    # print(tages2dict)

    cloud = wordcloud.WordCloud(font_path="C:\Windows\Fonts\MSYHL.TTC",
                                width=1200,
                                height=968,
                                background_color="white",
                                mask=titanic,
                                max_words=500,
                                max_font_size=150)
    word_cloud = cloud.generate_from_frequencies(tages2dict)
    # 设置图像大小
    plt.figure(figsize=(12, 12), dpi=200)
    plt.imshow(word_cloud)
    # # 保存图片
    plt.savefig('./titanicWordCloud.png')
    plt.show()


if __name__ == '__main__':
    # getData()
    # csv2dfNLP()
    jiebaCutVisualization()
