import os
import re
import pandas as pd
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from collections import Counter

# 配置中文字体和图形参数
plt.rcParams['font.sans-serif'] = ['SimHei', 'WenQuanYi Micro Hei', 'Heiti TC']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['figure.dpi'] = 300


class BiliBiliWordCloudAnalyzer:
    def __init__(self):
        # 停用词列表，可根据需要扩展
        self.stopwords = {'的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也',
                          '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '这', '那',"吗","么","啥"
                          '这个', '那个', '啊', '吧', '呢', '呀',"啦","么","哈","啊啊啊","与","视频","什么","能","我们",
                          "终于","居然","竟然","她","但","但是","这","这些","怎么","哈哈哈","又","把","乐乐","已经","这样","好"
                          "他","这个","当","这是","被","给","还","主","它","这么","还是","到底","燥燥","请","真","就是"}

        # 自定义B站常用词汇，提高分词准确性
        self.custom_words = [
            '弹幕', '安利', '硬核', '真香', '翻车', '涨知识', '绝了', 'yyds',"白嫖","三连","一键三连"
            '笑死', '泪目', '神仙', '不愧是', '竟然', '没想到', '简直', '震撼',"安利","小众","宝藏","宅舞"
            '教程', '合集', '系列', '第一期', '第二期', '下期', '更新', '来了',"联动", "求关注","求三联"
            'UP主', '阿婆主', '粉丝', '关注', '点赞', "投币",'评论', '转发', '收藏',"观众老爷","观众姥爷"
            '播放量', '弹幕量', '推荐', '必看', '年度', '盘点', '高能', '全程',"不要白嫖","博主"
            '治愈', '解压', '科普', '干货', '生活', '日常', '美食', '游戏', '动画',"up","UP","up主"
            '动漫', '影视', '音乐', '舞蹈', '搞笑', '美妆', '旅行', '科技', '数码',"游戏","血压"
        ]

        # 为jieba添加自定义词汇
        for word in self.custom_words:
            jieba.add_word(word)

    def find_csv_file(self, filename="bilibili_weekly_data.csv"):
        """自动查找CSV文件"""
        for root, dirs, files in os.walk('.'):
            for file in files:
                if file.lower() == filename.lower():
                    return os.path.join(root, file)
        return None

    def load_data(self, file_path):
        """加载CSV数据"""
        try:
            df = pd.read_csv(file_path)
            print(f"✅ 成功加载数据: {len(df)} 条记录")
            return df
        except Exception as e:
            print(f"❌ 加载数据失败: {str(e)}")
            return None

    def preprocess_text(self, text):
        """预处理文本数据"""
        if not isinstance(text, str):
            return ""

        # 移除URL、特殊字符和数字
        text = re.sub(r'http\S+|www\S+|https\S+', '', text, flags=re.MULTILINE)
        text = re.sub(r'[^\w\s]', '', text)  # 移除标点符号
        text = re.sub(r'\d+', '', text)  # 移除数字

        # 分词并过滤停用词
        words = jieba.cut(text.strip())
        return " ".join([word for word in words if word and word not in self.stopwords])

    def generate_wordcloud(self, text, title, output_path=None):
        """生成词云图（高频词更大更突出）"""
        if not text.strip():
            print(f"⚠️ 文本为空，无法生成{title}词云图")
            return

        # 配置词云参数，核心优化高频词显示
        wordcloud = WordCloud(
            width=1200,
            height=800,
            background_color='white',
            colormap='viridis',
            font_path=self._get_font_path(),
            max_words=150,  # 减少总词数，避免拥挤，让高频词更显眼
            max_font_size=200,  # 增大最大字体，强化高频词视觉效果
            min_font_size=10,   # 提高最小字体，避免低频词过小
            relative_scaling=0.9,  # 核心参数：词频对字体大小的影响程度（0-1，越高影响越大）
            random_state=42,
            prefer_horizontal=0.9
        ).generate(text)

        # 显示词云图
        plt.figure(figsize=(10, 8))
        plt.imshow(wordcloud, interpolation="bilinear")
        plt.axis("off")
        plt.title(title, fontsize=15, pad=20)

        # 保存词云图
        if output_path:
            wordcloud.to_file(output_path)
            print(f"✅ {title}词云图已保存至: {output_path}")

        plt.tight_layout()
        # 非交互环境下关闭显示，只保存图片
        # plt.show()

    def analyze_top_words(self, text, top_n=10):
        """分析并显示最常出现的词汇"""
        words = text.split()
        if not words:
            return []

        word_counts = Counter(words)
        return word_counts.most_common(top_n)

    def _get_font_path(self):
        """获取系统中可用的中文字体路径"""
        # Windows系统默认字体路径
        windows_fonts = [
            r'C:\Windows\Fonts\simhei.ttf',  # 黑体
            r'C:\Windows\Fonts\simsun.ttc',  # 宋体
            r'C:\Windows\Fonts\msyh.ttc',  # 微软雅黑
        ]

        # Linux系统默认字体路径
        linux_fonts = [
            '/usr/share/fonts/truetype/wqy/wqy-microhei.ttc',  # 文泉驿微米黑
            '/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc',  # 文泉驿正黑
            '/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc',  # Noto Sans CJK
        ]

        # macOS系统默认字体路径
        mac_fonts = [
            '/System/Library/Fonts/PingFang.ttc',  # 苹方
            '/System/Library/Fonts/STHeiti Light.ttc',  # 华文黑体
        ]

        # 检查字体是否存在
        if os.name == 'nt':  # Windows
            for font in windows_fonts:
                if os.path.exists(font):
                    return font
        elif os.name == 'posix':  # Linux/macOS
            if os.uname().sysname == 'Darwin':  # macOS
                for font in mac_fonts:
                    if os.path.exists(font):
                        return font
            else:  # Linux
                for font in linux_fonts:
                    if os.path.exists(font):
                        return font

        # 如果找不到中文字体，使用默认字体（可能无法正确显示中文）
        print("⚠️ 未找到中文字体，词云可能无法正确显示中文")
        return None

    def run(self):
        """运行分析程序"""
        print("🔍 正在查找CSV文件...")
        csv_path = self.find_csv_file()

        if not csv_path:
            print("❌ 未找到CSV文件，请确保'bilibili_weekly_data.csv'在当前目录或子目录中")
            return

        print(f"✅ 找到文件: {csv_path}")

        # 加载数据
        df = self.load_data(csv_path)
        if df is None:
            return

        # 检查必要的列是否存在
        required_columns = ['视频标题', '简介', '推荐理由']
        missing_columns = [col for col in required_columns if col not in df.columns]

        if missing_columns:
            print(f"❌ CSV文件缺少必要的列: {', '.join(missing_columns)}")
            return

        # 为每列生成词云图（三个图）
        for column in required_columns:
            print(f"\n📊 正在分析 '{column}'...")

            # 预处理文本
            processed_text = df[column].apply(self.preprocess_text).str.cat(sep=' ')

            # 分析高频词汇（控制台输出，方便对照词云）
            top_words = self.analyze_top_words(processed_text, top_n=10)
            if top_words:
                print(f"🔥 高频词汇: {', '.join([f'{word}({count})' for word, count in top_words])}")

            # 生成词云图（三个独立的图）
            output_file = f"{column}_词云图.png"
            self.generate_wordcloud(processed_text, f"{column}词云图", output_file)

        print("\n🎉 分析完成！已生成三个词云图")


if __name__ == "__main__":
    analyzer = BiliBiliWordCloudAnalyzer()
    analyzer.run()