import json
import os
import time
import pandas as pd
import jieba
import wordcloud
from collections import Counter
from config import Config

class DataProcessor:
    # 数据处理类
    
    def __init__(self):
        self.danmu_data = []
    
    def add_danmu_data(self, video_url, bvid, cid, danmus, video_index):
        # 添加弹幕数据
        for danmu in danmus:
            self.danmu_data.append({
                'video_url': video_url,
                'bvid': bvid,
                'cid': cid,
                'danmu': danmu,
                'video_index': video_index,
                'timestamp': time.time()
            })
    
    def save_progress(self):
      
        if self.danmu_data:
            Config.create_output_dir()
            df = pd.DataFrame(self.danmu_data)
            df.to_csv(f'{Config.OUTPUT_DIR}/progress_top300.csv', index=False, encoding='utf-8-sig')
            print(f"已保存进度，当前收集 {len(self.danmu_data)} 条弹幕")
    
    def save_results(self):

        if not self.danmu_data:
            print("没有数据可保存")
            return
            
        Config.create_output_dir()
        
        df = pd.DataFrame(self.danmu_data)
        csv_path = f'{Config.OUTPUT_DIR}/bilibili_top300_danmu.csv'
        df.to_csv(csv_path, index=False, encoding='utf-8-sig')
        
        json_path = f'{Config.OUTPUT_DIR}/bilibili_top300_danmu.json'
        with open(json_path, 'w', encoding='utf-8') as f:
            json.dump(self.danmu_data, f, ensure_ascii=False, indent=2)
        
        print(f"数据保存完成！共收集 {len(self.danmu_data)} 条弹幕")
        print(f"CSV文件: {csv_path}")
        print(f"JSON文件: {json_path}")
    
    def generate_wordcloud(self, img_path="bilibili_top300_wordcloud"):
        """生成词云图"""
        if not self.danmu_data:
            print("没有弹幕数据，无法生成词云")
            return False
        
        print("正在生成词云图...")
        
        try:
            all_danmu = " ".join([item['danmu'] for item in self.danmu_data])
            
            if not all_danmu.strip():
                print("弹幕文本为空，无法生成词云")
                return False
            
            print(f"原始弹幕总字符数: {len(all_danmu)}")
            
            words = jieba.lcut(all_danmu)
            print(f"分词后词数: {len(words)}")
            
            filtered_words = [word for word in words if len(word) > 1 and word not in Config.STOP_WORDS]
            text = " ".join(filtered_words)
            
            print(f"过滤后词数: {len(filtered_words)}")
            
            if not text.strip():
                print("过滤后文本为空，无法生成词云")
                return False
            
            word_freq = Counter(filtered_words)
            top_words = word_freq.most_common(8)
            print("前8个高频词汇:")
            for word, freq in top_words:
                print(f"  {word}: {freq}次")
            
            font_path = self._get_font_path()
            
            wc = wordcloud.WordCloud(
                font_path=font_path,
                width=1920,
                height=1080,
                background_color='white',
                max_words=300,
                collocations=False,
                margin=20,
                relative_scaling=0.3,
                prefer_horizontal=0.8,
                min_font_size=15,
                max_font_size=150,
                colormap='viridis',
                scale=2
            )
            
            wc.generate(text)
            
            output_file = f'{Config.OUTPUT_DIR}/{img_path}.jpg'
            wc.to_file(output_file)
            
            if os.path.exists(output_file):
                file_size = os.path.getsize(output_file) / 1024
                print(f"词云生成完毕: {output_file}")
                print(f"文件大小: {file_size:.1f} KB")
                print(f"图片尺寸: 1920x1080 像素")
                print(f"完整路径: {os.path.abspath(output_file)}")
            
            return True
            
        except Exception as e:
            print(f"生成词云时出错: {e}")
            import traceback
            traceback.print_exc()
            return False
    
    def _get_font_path(self):
        """获取字体路径"""
        font_paths = [
            '/usr/share/fonts/opentype/noto/NotoSansCJK-Black.ttc',
            'C:/Windows/Fonts/simhei.ttf',
            '/System/Library/Fonts/PingFang.ttc'
        ]
        
        for fp in font_paths:
            if os.path.exists(fp):
                return fp
        return None
    
    def analyze_results(self):
  
        if not self.danmu_data:
            print("没有弹幕数据可分析")
            return
            
        df = pd.DataFrame(self.danmu_data)
        
        print("\n=== 综合排序前300视频弹幕分析报告 ===")
        print(f"成功获取弹幕的视频数: {df['bvid'].nunique()}")
        print(f"总弹幕数: {len(df)}")
        
        if df['bvid'].nunique() > 0:
            print(f"平均每个视频弹幕数: {len(df) / df['bvid'].nunique():.2f}")
        
        df['danmu_length'] = df['danmu'].str.len()
        print(f"平均弹幕长度: {df['danmu_length'].mean():.2f} 字符")
        
        print("\n前50弹幕:")
        top_danmu = df['danmu'].value_counts().head(50)
        for danmu, count in top_danmu.items():
            print(f"  {danmu}: {count}次")