#  -*- coding:utf-8 -*- 
"""
@ author: 罗金盛
@ time: 2023/12/19 
@ file: 可视化界面版本.py

"""

import tkinter as tk
from tkinter import messagebox
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from threading import Thread
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from lxml import etree
import requests
import re
import jieba
from snownlp import SnowNLP
import json
from collections import defaultdict




class DoubanMovieAnalyzer:
    def __init__(self):
        self.name = None
        self.url_second = None


    def web_url(self):
        options = webdriver.ChromeOptions()
        options.add_experimental_option('excludeSwitches', ['enable-automation'])
        options.add_argument('--incognito')

        driver = webdriver.Chrome(executable_path=r"D:\Python_files\自然语言处理\期末课设-影评情感分析\chromedriver.exe",
                                  options=options)
        driver.get("https://movie.douban.com/")
        time.sleep(1)
        search = driver.find_element(By.ID, 'inp-query').send_keys(self.name)

        search = driver.find_element(By.CLASS_NAME, 'inp-btn').click()
        time.sleep(3)
        data = driver.page_source
        driver.close()
        driver.quit()
        html = etree.HTML(data)
        url_second = html.xpath('//*[@id="root"]/div/div[2]/div[1]/div[1]/div[1]/div[1]/div/div[1]/a/@href')

        url_second = url_second[0] + 'comments?sort=new_score&status=P'
        print(url_second)  # https://movie.douban.com/subject/30402296/comments?sort=new_score&status=P
        self.url_second = url_second


    def first_crawl(self, start):

        url = f"{self.url_second}&start={start * 20}&limit={start * 20}"
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/119.0.0.0 Safari/537.36"
        }
        response = requests.get(url=url, headers=headers)
        data = response.content.decode()
        html = etree.HTML(data)

        comments_data = []
        for i in range(1, 2):
            comment_time = html.xpath(f'//*[@id="comments"]/div[{i}]/div[2]/h3/span[2]/span[3]/text()')
            comment = html.xpath(f'//*[@id="comments"]/div[{i}]/div[2]/p/span/text()')
            if comment_time and comment:
                comment_time = comment_time[0]
                comment = comment[0]
                comments_data.append({
                    "评论时间": comment_time,
                    "评论内容": comment
                })
        self.append_to_json(comments_data, f'{self.name}.json')
        time.sleep(1)

    @staticmethod
    def segment_and_remove_stopwords(text):
        seg_list = jieba.cut(text, cut_all=False, HMM=True)
        stopwords = set()
        with open('cn_stopwords.txt', 'r', encoding='utf-8') as stopword_file:
            for line in stopword_file:
                stopwords.add(line.strip())
        filtered_words = [word for word in seg_list if word not in stopwords]
        filtered_text = ' '.join(filtered_words)
        return filtered_text

    @staticmethod
    def remove_empty_elements(comment_list):
        return [comment for comment in comment_list if comment]

    @staticmethod
    def append_to_json(data, filename):
        try:
            with open(filename, 'r', encoding='utf-8') as json_file:
                existing_data = json.load(json_file)
            existing_data.extend(data)
            with open(filename, 'w', encoding='utf-8') as json_file:
                json.dump(existing_data, json_file, ensure_ascii=False, indent=2)
        except FileNotFoundError:
            with open(filename, 'w', encoding='utf-8') as json_file:
                json.dump(data, json_file, ensure_ascii=False, indent=2)

    def generate_chart(self):
        # 读取最新的JSON文件
        with open(f'{self.name}.json', 'r', encoding='utf-8') as file:
            data = json.load(file)

        movie_comment_list = [item['评论内容'] for item in data]

        processed_comments = [self.segment_and_remove_stopwords(comment) for comment in movie_comment_list]
        processed_comments = self.remove_empty_elements(
            [self.segment_and_remove_stopwords(comment) for comment in movie_comment_list])

        for idx, comment in enumerate(processed_comments, 1):
            print(f"处理后的评论 {idx}: {comment}")

        sentiments = [SnowNLP(comment).sentiments for comment in processed_comments]

        yearly_monthly_comments = defaultdict(int)

        for comment in data:
            comment_time = comment["评论时间"]
            match = re.search(r"(\d{4})-(\d{2})", comment_time)
            if match:
                year, month = match.group(1), match.group(2)
                year_month_key = f"{year}-{month}"
                yearly_monthly_comments[year_month_key] += 1

        with open(f'{self.name}.json', 'r', encoding='utf-8') as json_file:
            comments_data = json.load(json_file)

        yearly_monthly_sentiments = defaultdict(list)

        for comment in comments_data:
            comment_time = comment["评论时间"]
            comment_content = comment["评论内容"]
            match = re.search(r"(\d{4})-(\d{2})", comment_time)
            if match:
                year, month = match.group(1), match.group(2)
                year_month_key = f"{year}-{month}"
                sentiment_score = SnowNLP(comment_content).sentiments
                yearly_monthly_sentiments[year_month_key].append((sentiment_score, comment_content))

        score_frequencies = defaultdict(int)
        for _, sentiment_comments in yearly_monthly_sentiments.items():
            sentiment_scores, _ = zip(*sentiment_comments)
            for score in sentiment_scores:
                score_frequencies[score] += 1

        colors = [score_frequencies[score] for year_month, sentiment_comments in yearly_monthly_sentiments.items() for
                  score, _
                  in sentiment_comments]

        plt.clf()  # 清除当前图表
        plt.figure(figsize=(12, 8))
        for i, (year_month, sentiment_comments) in enumerate(yearly_monthly_sentiments.items()):
            sentiment_scores, comments = zip(*sentiment_comments)
            plt.scatter([i] * len(sentiment_scores), sentiment_scores, c=colors[:len(sentiment_scores)], cmap='viridis',
                        alpha=0.19, s=50)

        plt.rcParams['font.sans-serif'] = ['SimHei']
        plt.title('每月评论情感得分分布')
        plt.xlabel('月份')
        plt.ylabel('评论情感得分')
        plt.xticks(range(len(yearly_monthly_sentiments)), list(yearly_monthly_sentiments.keys()), rotation=45)
        plt.colorbar(label='评论数量')


    def analyze_movie_comments(self, movie_name, on_complete):
        self.name = movie_name
        self.web_url()

        for i in range(1, 3): # 在这里修改页数
            self.first_crawl(i)

        # self.generate_chart()
        on_complete()

def run_analysis(movie_name, on_complete):
    global analyzer
    analyzer = DoubanMovieAnalyzer()
    analyzer.analyze_movie_comments(movie_name, on_complete)

class Application(tk.Tk):
    def __init__(self):
        super().__init__()

        self.title("豆瓣电影情感分析器")
        self.geometry("800x600")

        self.create_widgets()

    def create_widgets(self):
        self.label = tk.Label(self, text="请输入电影名:")
        self.label.pack()

        self.entry = tk.Entry(self)
        self.entry.pack()

        self.button = tk.Button(self, text="分析", command=self.start_analysis)
        self.button.pack()

        self.canvas = None

    def start_analysis(self):
        movie_name = self.entry.get()
        if not movie_name.strip():
            messagebox.showinfo("提示", "请输入电影名称！")
            return

        # 使用线程来避免阻塞GUI
        thread = Thread(target=run_analysis, args=(movie_name, self.on_analysis_complete))
        thread.start()

    def on_analysis_complete(self):
        global analyzer
        # 确保图表的生成和显示都在主线程中完成
        self.after(0, analyzer.generate_chart)
        self.after(0, self.show_chart)

    def show_chart(self):
        # 从分析器获取最新的图表
        figure = plt.gcf()  # 获取当前的matplotlib图表
        if self.canvas:
            self.canvas.get_tk_widget().pack_forget()
        self.canvas = FigureCanvasTkAgg(figure, self)
        widget = self.canvas.get_tk_widget()
        widget.pack(expand=True, fill=tk.BOTH)


if __name__ == "__main__":
    app = Application()
    app.mainloop()