# -*- coding: utf-8 -*-
"""
作者: 肖麒
联系方式：QQ：1505448035
时间: 2025/9/17 16:19
文件作用：情感分析与词频分析，用于洞察观众情绪
"""
# 中文分词
import jieba
# 词云库
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from collections import Counter
import re
# 添加颜色映射库
import numpy as np
from matplotlib import cm

# 添加情感分析相关库
try:
    from snownlp import SnowNLP
    SNOW_NLP_AVAILABLE = True
except ImportError:
    SNOW_NLP_AVAILABLE = False
    print("警告：未安装snownlp库，将无法进行情感分析")

# 导入Bilibili评论获取类
from api.BilibiliCommentFetcher import BilibiliCommentFetcher

class App:
    def __init__(self):
        self.comment_fetcher = BilibiliCommentFetcher()

    def fetch_and_analyze_comments(self):
        """
        获取B站评论并进行情感分析和词频分析
        """
        # 获取评论列表
        comments = self.comment_fetcher.get_BilibiliCommentFetcher()
        print(f"共获取到 {len(comments)} 条评论")

        # 情感分析
        if SNOW_NLP_AVAILABLE:
            self.sentiment_analysis(comments)
        else:
            print("跳过情感分析（需要安装snownlp库）")

        # 词频分析
        word_freq = self.word_frequency_analysis(comments)

        # 生成词云图
        self.generate_wordcloud(word_freq)

        return comments

    def sentiment_analysis(self, comments):
        """
        对评论进行情感分析
        """
        sentiments = []
        positive_count = 0
        negative_count = 0

        for comment in comments:
            if comment.strip():  # 确保评论不为空
                s = SnowNLP(comment)
                sentiment_score = s.sentiments
                sentiments.append(sentiment_score)

                if sentiment_score > 0.5:
                    positive_count += 1
                else:
                    negative_count += 1

        if sentiments:
            avg_sentiment = sum(sentiments) / len(sentiments)
            print(f"\n=== 情感分析结果 ===")
            print(f"平均情感得分: {avg_sentiment:.3f} (>0.5为积极, <0.5为消极)")
            print(f"积极评论数量: {positive_count}")
            print(f"消极评论数量: {negative_count}")

    def word_frequency_analysis(self, comments):
        """
        进行词频分析
        """
        # 合并所有评论文本
        all_text = " ".join(comments)

        # 清理文本，只保留中文字符
        clean_text = re.sub(r'[^\u4e00-\u9fa5]', '', all_text)

        # 使用jieba进行分词
        words = jieba.lcut(clean_text)

        # 过滤掉单字符词
        words = [word for word in words if len(word) > 1]

        # 统计词频
        word_freq = Counter(words)

        # 输出前20个高频词
        print(f"\n=== 词频分析结果（前20个） ===")
        for word, freq in word_freq.most_common(20):
            print(f"{word}: {freq}")

        return word_freq

    def generate_wordcloud(self, word_freq):
        """
        根据词频生成词云图
        """
        if not word_freq:
            print("没有足够的词汇生成词云")
            return

        # 设置中文字体
        plt.rcParams['font.sans-serif'] = ['SimHei']
        plt.rcParams['axes.unicode_minus'] = False

        # 创建词云对象 - 增强视觉效果
        wc = WordCloud(
            font_path="simhei.ttf",  # 可能需要根据系统调整字体路径
            width=1200,
            height=800,
            background_color='black',  # 黑色背景更炫酷
            max_words=200,
            max_font_size=120,
            min_font_size=12,
            random_state=42,
            colormap=cm.plasma,  # 使用等离子体颜色映射
            contour_width=2,  # 添加轮廓宽度
            contour_color='steelblue',  # 轮廓颜色
            relative_scaling=0.5,  # 相对缩放
            prefer_horizontal=0.7  # 70%的词水平放置
        )

        # 生成词云
        wc.generate_from_frequencies(word_freq)

        # 显示词云图 - 增强视觉效果
        plt.figure(figsize=(15, 10))
        plt.imshow(wc, interpolation='bilinear')
        plt.axis('off')
        plt.title('B站评论词云图', fontsize=24, color='white', pad=20)
        # 添加渐变色背景
        plt.gca().set_facecolor('black')
        plt.gcf().set_facecolor('black')
        plt.tight_layout()
        plt.show()

# 使用示例
if __name__ == "__main__":
    app = App()
    app.fetch_and_analyze_comments()
