#  -*- coding:utf-8 -*-
"""
@ author: 罗金盛
@ time: 2023/12/7
@ file: 分析天气之子影评情绪.py
"""

# 网址：https://movie.douban.com/subject/30402296/comments?start={}&limit=20&sort=new_score&status=P

import time
import requests
from lxml import etree
import re
import jieba
import jieba.analyse
from snownlp import SnowNLP
import matplotlib.pyplot as plt
from snownlp import SnowNLP

def crawl(film_name):
    url = f"https://search.douban.com/movie/subject_search?search_text={film_name}&cat=1002"

    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"
    }

    response = requests.get(url=url, headers=headers)
    data = response.content.decode()
    # 对数据进行转换
    html = etree.HTML(data)
    # 通过和元素定位符号配合  包含当页的所有影评
    url_second = html.xpath('//*[@id="root"]/div/div[2]/div[1]/div[1]/div[1]/div[1]/div/div[1]/a/@href')

    url_second = str(url_second)+'comments?sort=new_score&status=P'
    print(url_second)
def first_crawl(start):
    # 起始url 发现每一页的影评由start这个参数决定 并且为20的倍数 0 20 40
    url=f"https://movie.douban.com/subject/30402296/reviews?start={start*20}"
#     请求头
    headers={
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"
    }
#     发送请求
    response=requests.get(url=url,headers=headers)
    data=response.content.decode()
    # 对数据进行转换
    html=etree.HTML(data)
    # 通过和元素定位符号配合  包含当页的所有影评
    comments = html.xpath('/html/body/div[3]/div[1]/div/div[1]/div[1]/div[*]/div/div/div[*]/div/text()')
    # print(comments)
    # 这里要进行两次数据清洗 不然洗不干净 第一次是除去空白格以及标点符号 第二次是去除空数据（由于xpath提取的问题）
    cleaned_data_1 = [re.sub(r'[\'"()\xa0]+', '', re.sub(r'\s+', ' ', comment).strip()) for comment in comments]
    cleaned_data_2 = [re.sub(r'[^\w\s]', '', text) for text in cleaned_data_1 if text]
    # print(cleaned_data_2)
    time.sleep(1) # 防止请求过快导致被豆瓣封ip
    return cleaned_data_2


def segment_and_remove_stopwords(text):
    # 使用jieba进行中文分词，同时使用默认停用词表
    seg_list = jieba.cut(text, cut_all=False, HMM=True)

    # 去除停用词
    stopwords = set()
    with open('cn_stopwords.txt', 'r', encoding='utf-8') as stopword_file:
        for line in stopword_file:
            stopwords.add(line.strip())

    filtered_words = [word for word in seg_list if word not in stopwords]

    # 将分词后的词语拼接成句子
    filtered_text = ' '.join(filtered_words)

    return filtered_text


if __name__ == '__main__':
    # movie_comment_list = []
    # for i in range(0,11):  # 这里修改页数 为了避免资源消耗 我测试的时候只用了10页
    #     movie_comments = first_crawl(i)
    #     for movie_comment in movie_comments:
    #         movie_comment_list.append(movie_comment) # 把所有的评论都弄到一起
    #
    # print(movie_comment_list)
    #
    # # 处理每条影片评论
    # processed_comments = [segment_and_remove_stopwords(comment) for comment in movie_comment_list]
    #
    # # 输出处理后的评论
    # for idx, comment in enumerate(processed_comments, 1):
    #     print(f"处理后的评论 {idx}: {comment}")
    #
    # # 逐个情感分析并记录得分
    # sentiments = []
    # for idx, comment in enumerate(processed_comments, 1):
    #     s = SnowNLP(comment)
    #     sentiment = s.sentiments
    #     sentiments.append(sentiment)
    #     print(f"评论 {idx} 情感分析结果：{sentiment}")
    #
    # # 可视化情感分析数据
    # plt.figure(figsize=(8, 5))
    # plt.plot(range(1, len(sentiments) + 1), sentiments, marker='o', linestyle='-', color='b')
    # plt.rcParams['font.sans-serif'] = ['SimHei']
    # plt.title('情感分析结果')
    # plt.xlabel('评论编号')
    # plt.ylabel('情感得分')
    # plt.ylim(0, 1)  # 设置y轴范围在0到1之间
    # plt.grid(True)
    # plt.show()
    crawl("心灵捕手")





