#  -*- coding:utf-8 -*- 
"""
@ author: 罗金盛
@ time: 2023/12/14 
@ file: demo.py

"""
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from lxml import etree
import requests
import re
import jieba
import jieba.analyse
from snownlp import SnowNLP
import matplotlib.pyplot as plt
from snownlp import SnowNLP
import json
from collections import defaultdict


def web_url():
    options=webdriver.ChromeOptions()
    options.add_experimental_option('excludeSwitches',['enable-automation'])
    options.add_argument('--incognito')

    driver=webdriver.Chrome(executable_path=r"D:\Python_files\自然语言处理\情感分析-天气之子评论\chromedriver.exe",options=options)
    driver.get("https://movie.douban.com/")
    time.sleep(1)
    sreach=driver.find_element(By.ID,'inp-query').send_keys(name)

    sreach=driver.find_element(By.CLASS_NAME, 'inp-btn').click()
    time.sleep(3)
    data=driver.page_source
    driver.close()
    driver.quit()
    html = etree.HTML(data)
    # 通过和元素定位符号配合  包含当页的所有影评
    url_second = html.xpath('//*[@id="root"]/div/div[2]/div[1]/div[1]/div[1]/div[1]/div/div[1]/a/@href')

    url_second = url_second[0]+'comments?sort=new_score&status=P'
    print(url_second) # https://movie.douban.com/subject/30402296/comments?sort=new_score&status=P
    return url_second

"""
https://movie.douban.com/subject/30402296/comments?&status=P&sort=new_score
https://movie.douban.com/subject/30402296/comments?sort=new_score&status=P&start=20&limit=20
"""
def first_crawl(start,url_second):
    # 起始url 发现每一页的影评由start这个参数决定 并且为20的倍数 0 20 40
    url=url_second+f"&start={start*20}&limit={start*20}"
#     请求头
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"
    }
#     发送请求
    response=requests.get(url=url,headers=headers)
    data=response.content.decode()
    # 对数据进行转换
    html=etree.HTML(data)

    comments_data = []
    # 通过和元素定位符号配合  包含当页的所有影评
    for i in range(1, 21):
        comment_time = html.xpath(f'//*[@id="comments"]/div[{i}]/div[2]/h3/span[2]/span[3]/text()')
        comment = html.xpath(f'//*[@id="comments"]/div[{i}]/div[2]/p/span/text()')
        if comment_time and comment:
            comment_time = comment_time[0]
            comment = comment[0]

            # 将评论时间和评论内容添加到列表中
            comments_data.append({
                "评论时间": comment_time,
                "评论内容": comment
            })
    comments_data.extend(comments_data)
    append_to_json(comments_data, f'{name}.json')
    time.sleep(1)



def segment_and_remove_stopwords(text):
    # 使用jieba进行中文分词，同时使用默认停用词表
    seg_list = jieba.cut(text, cut_all=False, HMM=True)

    # 去除停用词
    stopwords = set()
    with open('cn_stopwords.txt', 'r', encoding='utf-8') as stopword_file:
        for line in stopword_file:
            stopwords.add(line.strip())

    filtered_words = [word for word in seg_list if word not in stopwords]

    # 将分词后的词语拼接成句子
    filtered_text = ' '.join(filtered_words)

    return filtered_text


def remove_empty_elements(comment_list):
    return [comment for comment in comment_list if comment]

def append_to_json(data, filename):
    try:
        with open(filename, 'r', encoding='utf-8') as json_file:
            existing_data = json.load(json_file)
        existing_data.extend(data)
        with open(filename, 'w', encoding='utf-8') as json_file:
            json.dump(existing_data, json_file, ensure_ascii=False, indent=2)
    except FileNotFoundError:
        # 如果文件不存在，直接写入新数据
        with open(filename, 'w', encoding='utf-8') as json_file:
            json.dump(data, json_file, ensure_ascii=False, indent=2)


if __name__ == '__main__':
    name = input("请输入电影名: ")
    url_second = web_url()

    for i in range(1,50):  # 这里修改页数 为了避免资源消耗 我测试的时候只用了10页
        first_crawl(i,url_second)
        # for movie_comment in movie_comments:
        #     movie_comment_list.append(movie_comment) # 把所有的评论都弄到一起
        # 读取JSON文件
    with open(f'{name}.json', 'r', encoding='utf-8') as file:
        data = json.load(file)

        # 提取评论内容
    movie_comment_list = []
    for item in data:
        movie_comment_list.append(item['评论内容'])
    # print(movie_comment_list)

    # 处理每条影片评论
    processed_comments = [segment_and_remove_stopwords(comment) for comment in movie_comment_list]
    processed_comments = remove_empty_elements([segment_and_remove_stopwords(comment) for comment in movie_comment_list])

    # 输出处理后的评论
    for idx, comment in enumerate(processed_comments, 1):
        print(f"处理后的评论 {idx}: {comment}")

    # 逐个情感分析并记录得分
    sentiments = []
    for idx, comment in enumerate(processed_comments, 1):
        s = SnowNLP(comment)
        sentiment = s.sentiments
        sentiments.append(sentiment)
        print(f"评论 {idx} 情感分析结果：{sentiment}")


    yearly_monthly_comments = defaultdict(int)
    for comment in data:
        comment_time = comment["评论时间"]

        # 使用正则表达式提取年份和月份
        match = re.search(r"(\d{4})-(\d{2})", comment_time)

        if match:
            year, month = match.group(1), match.group(2)
            # 构建年份和月份的键
            year_month_key = f"{year}-{month}"
            # 增加相应年份和月份的评论数量
            yearly_monthly_comments[year_month_key] += 1

    # 读取评论数据
    with open(f'{name}.json', 'r', encoding='utf-8') as json_file:
        comments_data = json.load(json_file)

    # 创建一个字典，用于存储每个年份和月份的情感得分平均值
    yearly_monthly_sentiments = defaultdict(float)

    # 创建一个字典，用于存储每个年份和月份的情感得分列表
    yearly_monthly_sentiments = defaultdict(list)

    # 遍历评论数据
    for comment in comments_data:
        comment_time = comment["评论时间"]
        comment_content = comment["评论内容"]

        # 使用正则表达式提取年份和月份
        match = re.search(r"(\d{4})-(\d{2})", comment_time)

        if match:
            year, month = match.group(1), match.group(2)
            # 构建年份和月份的键
            year_month_key = f"{year}-{month}"

            # 使用SnowNLP进行情感分析
            sentiment_score = SnowNLP(comment_content).sentiments

            # 将情感得分和评论内容添加到相应年份和月份的列表中
            yearly_monthly_sentiments[year_month_key].append((sentiment_score, comment_content))

    # 计算每个得分的频率
    score_frequencies = defaultdict(int)
    for _, sentiment_comments in yearly_monthly_sentiments.items():
        sentiment_scores, _ = zip(*sentiment_comments)
        for score in sentiment_scores:
            score_frequencies[score] += 1

    # 根据频率计算颜色深浅
    colors = [score_frequencies[score] for year_month, sentiment_comments in yearly_monthly_sentiments.items() for
              score, _
              in sentiment_comments]

    # 绘制散点图，对比两个月的情感得分，并使用颜色深浅表示评论数量
    plt.figure(figsize=(12, 8))
    for i, (year_month, sentiment_comments) in enumerate(yearly_monthly_sentiments.items()):
        # 拆分情感得分和评论内容
        sentiment_scores, comments = zip(*sentiment_comments)

        # 绘制散点图，使用颜色深浅表示评论数量
        plt.scatter([i] * len(sentiment_scores), sentiment_scores, c=colors[:len(sentiment_scores)], cmap='viridis',
                    alpha=0.1, s=50)
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.title('每月评论情感得分分布')
    plt.xlabel('月份')
    plt.ylabel('评论情感得分')
    plt.xticks(range(len(yearly_monthly_sentiments)), list(yearly_monthly_sentiments.keys()), rotation=45)
    plt.colorbar(label='评论数量')
    plt.show()












