import json
import logging
from typing import List, Dict, Any, Optional
from dataclasses import asdict

from core.spider import WeiboSpider
from core.models import WeiboPost
from analysis.analyzer import DataAnalyzer
from analysis.visualizer import Visualizer
from config.settings import WeiboConfig
from config.settings import setup_logging
from utils.file_utils import ensure_directory, save_json, save_csv
import os

setup_logging()
logger = logging.getLogger('WeiboCrawlerApp')

class WeiboCrawlerApp:
    """微博爬虫应用 - 主控制器"""
    def __init__(self, config: WeiboConfig = None):
        self.config = config or WeiboConfig()
        self.spider = WeiboSpider(self.config)
        self.analyzer = None
        self.visualizer = None
    
    def run_analysis(self, keywords: List[str], pages_per_keyword: int = 5, output_dir: str = 'output'):
        """
        运行完整的分析流程
        
        Args:
            keywords: 关键词列表
            pages_per_keyword: 每个关键词爬取的页数
            output_dir: 输出目录
        """
        
        os.makedirs(output_dir, exist_ok=True)
        
        all_posts = []
        
        # 爬取数据
        for keyword in keywords:
            logger.info(f"正在爬取关键词: {keyword}")
            posts = self.spider.search_keyword(keyword, pages_per_keyword)
            all_posts.extend(posts)
            
            # 保存原始数据
            self._save_posts(posts, os.path.join(output_dir, f'weibo_{keyword}.json'))
        
        if not all_posts:
            logger.warning("没有获取到任何数据")
            return
        
        # 数据分析
        self.analyzer = DataAnalyzer(all_posts)
        self.visualizer = Visualizer(self.analyzer)
        
        # 显示统计信息
        stats = self.analyzer.get_basic_stats()
        logger.info("数据统计:")
        for key, value in stats.items():
            logger.info(f"  {key}: {value}")
        
        # 显示热门微博
        top_posts = self.analyzer.get_top_posts(5)
        if not top_posts.empty:
            logger.info("热门微博TOP 5:")
            for idx, row in top_posts.iterrows():
                logger.info(f"  {row['user_name']}: {row['content'][:50]}... (互动量: {row['total_engagement']})")
        
        # 可视化
        self.visualizer.plot_engagement_analysis(os.path.join(output_dir, 'engagement_analysis.png'))
        self.visualizer.generate_wordcloud(os.path.join(output_dir, 'wordcloud.png'))
        
        # 保存分析结果
        self._save_analysis_results(output_dir)
        
        logger.info(f"分析完成！结果保存在: {output_dir}")
    
    def _save_posts(self, posts: List[WeiboPost], filepath: str):
        """保存微博数据"""
        # 使用 WeiboPost 对象的 to_dict 方法
        data = [post.to_dict() for post in posts]
        
        # 确保目录存在
        os.makedirs(os.path.dirname(filepath), exist_ok=True)
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
    
    def _save_analysis_results(self, output_dir: str):
        """保存分析结果"""
        if self.analyzer is None:
            return
        
        # 保存DataFrame为CSV
        csv_path = os.path.join(output_dir, 'weibo_analysis.csv')
        self.analyzer.df.to_csv(csv_path, index=False, encoding='utf-8-sig')
        
        # 保存统计信息
        stats_path = os.path.join(output_dir, 'statistics.json')
        with open(stats_path, 'w', encoding='utf-8') as f:
            json.dump(self.analyzer.get_basic_stats(), f, ensure_ascii=False, indent=2)