import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from lxml import etree
import requests
import re
import jieba
from snownlp import SnowNLP
import json
from collections import defaultdict
import matplotlib.pyplot as plt

class DoubanMovieAnalyzer:
    def __init__(self):
        self.name = None
        self.url_second = None

    def get_user_input(self):
        self.name = input("请输入电影名: ")


    def web_url(self):
        options = webdriver.ChromeOptions()
        options.add_experimental_option('excludeSwitches', ['enable-automation'])
        options.add_argument('--incognito')

        driver = webdriver.Chrome(executable_path=r"D:\Python_files\自然语言处理\期末课设-影评情感分析\chromedriver.exe",
                                  options=options)
        driver.get("https://movie.douban.com/")
        time.sleep(1)
        search = driver.find_element(By.ID, 'inp-query').send_keys(self.name)

        search = driver.find_element(By.CLASS_NAME, 'inp-btn').click()
        time.sleep(3)
        data = driver.page_source
        driver.close()
        driver.quit()
        html = etree.HTML(data)
        url_second = html.xpath('//*[@id="root"]/div/div[2]/div[1]/div[1]/div[1]/div[1]/div/div[1]/a/@href')

        url_second = url_second[0] + 'comments?sort=new_score&status=P'
        print(url_second)  # https://movie.douban.com/subject/30402296/comments?sort=new_score&status=P
        self.url_second = url_second

    def first_crawl(self, start):
        url = f"{self.url_second}&start={start * 20}&limit={start * 20}"
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/119.0.0.0 Safari/537.36"
        }
        response = requests.get(url=url, headers=headers)
        data = response.content.decode()
        html = etree.HTML(data)

        comments_data = []
        for i in range(1, 21):
            comment_time = html.xpath(f'//*[@id="comments"]/div[{i}]/div[2]/h3/span[2]/span[3]/text()')
            comment = html.xpath(f'//*[@id="comments"]/div[{i}]/div[2]/p/span/text()')
            if comment_time and comment:
                comment_time = comment_time[0]
                comment = comment[0]
                comments_data.append({
                    "评论时间": comment_time,
                    "评论内容": comment
                })
        comments_data.extend(comments_data)
        self.append_to_json(comments_data, f'{self.name}.json')
        time.sleep(1)

    @staticmethod
    def segment_and_remove_stopwords(text):
        seg_list = jieba.cut(text, cut_all=False, HMM=True)
        stopwords = set()
        with open('cn_stopwords.txt', 'r', encoding='utf-8') as stopword_file:
            for line in stopword_file:
                stopwords.add(line.strip())
        filtered_words = [word for word in seg_list if word not in stopwords]
        filtered_text = ' '.join(filtered_words)
        return filtered_text

    @staticmethod
    def remove_empty_elements(comment_list):
        return [comment for comment in comment_list if comment]

    @staticmethod
    def append_to_json(data, filename):
        try:
            with open(filename, 'r', encoding='utf-8') as json_file:
                existing_data = json.load(json_file)
            existing_data.extend(data)
            with open(filename, 'w', encoding='utf-8') as json_file:
                json.dump(existing_data, json_file, ensure_ascii=False, indent=2)
        except FileNotFoundError:
            with open(filename, 'w', encoding='utf-8') as json_file:
                json.dump(data, json_file, ensure_ascii=False, indent=2)

    def analyze_movie_comments(self):
        self.get_user_input()
        self.web_url()

        for i in range(1, 2):
            self.first_crawl(i)

        with open(f'{self.name}.json', 'r', encoding='utf-8') as file:
            data = json.load(file)

        movie_comment_list = [item['评论内容'] for item in data]

        processed_comments = [self.segment_and_remove_stopwords(comment) for comment in movie_comment_list]
        processed_comments = self.remove_empty_elements(
            [self.segment_and_remove_stopwords(comment) for comment in movie_comment_list])

        for idx, comment in enumerate(processed_comments, 1):
            print(f"处理后的评论 {idx}: {comment}")

        sentiments = [SnowNLP(comment).sentiments for comment in processed_comments]

        yearly_monthly_comments = defaultdict(int)

        for comment in data:
            comment_time = comment["评论时间"]
            match = re.search(r"(\d{4})-(\d{2})", comment_time)
            if match:
                year, month = match.group(1), match.group(2)
                year_month_key = f"{year}-{month}"
                yearly_monthly_comments[year_month_key] += 1

        with open(f'{self.name}.json', 'r', encoding='utf-8') as json_file:
            comments_data = json.load(json_file)

        yearly_monthly_sentiments = defaultdict(list)

        for comment in comments_data:
            comment_time = comment["评论时间"]
            comment_content = comment["评论内容"]
            match = re.search(r"(\d{4})-(\d{2})", comment_time)
            if match:
                year, month = match.group(1), match.group(2)
                year_month_key = f"{year}-{month}"
                sentiment_score = SnowNLP(comment_content).sentiments
                yearly_monthly_sentiments[year_month_key].append((sentiment_score, comment_content))

        score_frequencies = defaultdict(int)
        for _, sentiment_comments in yearly_monthly_sentiments.items():
            sentiment_scores, _ = zip(*sentiment_comments)
            for score in sentiment_scores:
                score_frequencies[score] += 1

        colors = [score_frequencies[score] for year_month, sentiment_comments in yearly_monthly_sentiments.items() for
                  score, _
                  in sentiment_comments]

        plt.figure(figsize=(12, 8))
        for i, (year_month, sentiment_comments) in enumerate(yearly_monthly_sentiments.items()):
            sentiment_scores, comments = zip(*sentiment_comments)
            plt.scatter([i] * len(sentiment_scores), sentiment_scores, c=colors[:len(sentiment_scores)], cmap='viridis',
                        alpha=0.19, s=50)

        plt.rcParams['font.sans-serif'] = ['SimHei']
        plt.title('每月评论情感得分分布')
        plt.xlabel('月份')
        plt.ylabel('评论情感得分')
        plt.xticks(range(len(yearly_monthly_sentiments)), list(yearly_monthly_sentiments.keys()), rotation=45)
        plt.colorbar(label='评论数量')
        plt.show()


if __name__ == '__main__':
    analyzer = DoubanMovieAnalyzer()
    analyzer.analyze_movie_comments()