from typing import Dict, Any, List
from .base_agent import BaseAgent, AgentResult
from datetime import datetime, timedelta
import re
from collections import Counter
import random

class TrendAnalysisAgent(BaseAgent):
    """趋势分析智能体 - 生成词云、热点统计、用户覆盖数趋势"""
    
    def __init__(self, config_path: str = None):
        super().__init__()
        self.config_path = config_path
        self.name = "TrendAnalysisAgent"
        self.version = "1.0.0"
    
    def get_name(self) -> str:
        return self.name
    
    def get_version(self) -> str:
        return self.version
    
    def get_dependencies(self) -> List[str]:
        return ["DataCollectionAgent", "SummaryGenerationAgent"]
    
    def execute(self, input_data: Dict[str, Any]) -> AgentResult:
        self.log_execution_start()
        start_time = datetime.now()
        
        try:
            # 从输入中获取数据
            summary_data = input_data.get("SummaryGenerationAgent_result", {})
            data_collection_result = input_data.get("DataCollectionAgent_result", {})
            
            if not summary_data and not data_collection_result:
                execution_time = (datetime.now() - start_time).total_seconds()
                self.log_execution_end(False, execution_time)
                return AgentResult(
                    success=False,
                    data={},
                    error="缺少数据输入"
                )
            
            # 生成趋势分析数据
            trend_data = self._generate_trend_analysis(summary_data, data_collection_result)
            
            execution_time = (datetime.now() - start_time).total_seconds()
            self.log_execution_end(True, execution_time)
            
            return AgentResult(
                success=True,
                data=trend_data,
                metadata={
                    "execution_time": execution_time,
                    "timestamp": datetime.now().isoformat(),
                    "data_sources": ["SummaryGenerationAgent", "DataCollectionAgent"]
                }
            )
            
        except Exception as e:
            execution_time = (datetime.now() - start_time).total_seconds()
            self.log_execution_end(False, execution_time)
            return AgentResult(
                success=False,
                data={},
                error=f"趋势分析失败: {str(e)}"
            )
    
    def _generate_trend_analysis(self, summary_data: Dict[str, Any], data_collection_result: Dict[str, Any]) -> Dict[str, Any]:
        """生成趋势分析数据"""
        
        # 1. 生成词云数据
        word_cloud_data = self._generate_word_cloud(summary_data, data_collection_result)
        
        # 2. 生成今日热点统计
        today_stats = self._generate_today_stats(summary_data, data_collection_result)
        
        # 3. 生成用户覆盖数趋势
        user_coverage_trend = self._generate_user_coverage_trend()
        
        # 4. 生成平台热度趋势
        platform_trends = self._generate_platform_trends(summary_data, data_collection_result)
        
        return {
            "word_cloud": word_cloud_data,
            "today_stats": today_stats,
            "user_coverage_trend": user_coverage_trend,
            "platform_trends": platform_trends,
            "last_updated": datetime.now().isoformat()
        }
    
    def _generate_word_cloud(self, summary_data: Dict[str, Any], data_collection_result: Dict[str, Any]) -> List[Dict[str, Any]]:
        """生成词云数据 - 只从实际热点数据中提取有意义的关键词，避免硬编码无关词"""
        
        # 定义排除词（避免冗余和无效词）
        excluded_words = {
            "的", "了", "是", "在", "我", "有", "和", "就", "不", "人", "都", "一", "一个", "上", "也", "很", "到", "说", "要", "去", "你", "会", "着", "没有", "看", "好", "自己", "这"
        }
        
        # 收集所有文本以提取关键词
        all_text = ""
        
        # 从原始数据中提取标题和描述（优先使用）
        if data_collection_result:
            for platform_key, platform_data in data_collection_result.items():
                if isinstance(platform_data, dict) and "data" in platform_data:
                    for item in platform_data["data"]:
                        if isinstance(item, dict):
                            # 获取标题
                            title = item.get("title", "")
                            if title:
                                all_text += title + " "
                            # 获取描述（如果有）
                            desc = item.get("desc", "")
                            if desc:
                                all_text += desc + " "
        
        # 从摘要数据中提取标题和描述
        if summary_data:
            for platform_key, platform_data in summary_data.items():
                if isinstance(platform_data, dict) and "data" in platform_data:
                    for item in platform_data["data"]:
                        if isinstance(item, dict):
                            # 获取标题
                            title = item.get("title", "")
                            if title:
                                all_text += title + " "
                            # 获取描述（如果有）
                            desc = item.get("desc", "")
                            if desc:
                                all_text += desc + " "
        
        # 提取关键词
        extracted_keywords = self._extract_keywords(all_text)
        
        # 使用Counter计算词频
        keyword_counter = Counter(extracted_keywords)
        
        # 创建词云数据
        word_cloud_data = []
        
        # 从提取的关键词中筛选有效关键词
        existing_names = set()
        
        # 找出最大词频，用于后续权重计算的归一化
        max_freq = max(keyword_counter.values()) if keyword_counter else 1
        
        # 按词频排序关键词
        sorted_keywords = sorted(keyword_counter.items(), key=lambda x: x[1], reverse=True)
        
        for i, (keyword, freq) in enumerate(sorted_keywords):
            # 确保关键词有效：长度合适，不是排除词
            if (keyword not in existing_names and 
                keyword not in excluded_words and 
                2 <= len(keyword) <= 6):  # 允许2-6字词
                
                # 基于词频、位置和词长的综合权重计算
                # 1. 词频权重（占60%）
                freq_weight = (freq / max_freq) * 60
                
                # 2. 位置权重（占20%）- 排名越靠前权重越高
                position_weight = (1 - (i / len(sorted_keywords))) * 20
                
                # 3. 词长权重（占20%）
                if len(keyword) >= 4:  # 较长的专有名词
                    length_weight = 20
                elif len(keyword) == 3:  # 3字词
                    length_weight = 15
                else:  # 2字词
                    length_weight = 10
                
                # 总权重，范围从20-100，确保差异明显
                total_weight = int(freq_weight + position_weight + length_weight)
                total_weight = max(20, min(100, total_weight))  # 确保权重在合理范围内
                
                word_cloud_data.append({
                    "name": keyword,
                    "value": total_weight
                })
                existing_names.add(keyword)
                
                if len(word_cloud_data) >= 30:  # 增加关键词数量上限
                    break
        
        # 如果关键词数量不足，返回可用的关键词（避免添加硬编码的无关词）
        if len(word_cloud_data) < 5:
            # 只添加少量通用热点相关词作为最后的备选
            common_keywords = ["热点", "新闻", "话题", "关注", "最新"]
            for keyword in common_keywords:
                if keyword not in existing_names:
                    word_cloud_data.append({
                        "name": keyword,
                        "value": 10
                    })
                    existing_names.add(keyword)
                    if len(word_cloud_data) >= 5:
                        break
        
        # 排序，确保权重高的词排在前面
        word_cloud_data.sort(key=lambda x: x["value"], reverse=True)
        
        # 限制返回数量，确保质量和显示效果
        return word_cloud_data[:15]  # 最多15个关键词，确保都是高质量的单个词语
    
    def _get_extended_stop_words(self):
        """获取扩展的停用词列表，用于最后的备用提取"""
        # 比常规停用词更严格的过滤词表
        return {
            "的", "了", "在", "是", "我", "有", "和", "就", "不", "人", "都", "一", "一个", 
            "上", "也", "很", "到", "说", "要", "去", "你", "会", "着", "没有", "看", "好", 
            "自己", "这", "那", "什么", "怎么", "为什么", "因为", "所以", "但是", "然后",
            # 特别添加用户提到的问题词
            "由于", "信息", "有限", "由于信息有限", "近日", "报道", "据报道", "据悉", "发现", "表示", "提到"
        }
    
    def _extract_keywords(self, text: str) -> List[str]:
        """从实际文本中提取关键词，不使用硬编码的无关词"""
        if not text:
            return []
        
        # 清理文本，移除特殊字符
        text = re.sub(r'[^\w\s\u4e00-\u9fa5]', ' ', text)
        
        # 定义常见停用词（中文）
        stop_words = {
            "的", "了", "是", "在", "我", "有", "和", "就", "不", "人", "都", "一", "一个", "上", "也", "很", "到", "说", "要", "去", 
            "你", "会", "着", "没有", "看", "好", "自己", "这", "个", "在", "被", "为", "与", "及", "对于", "关于", "通过", 
            "随着", "按照", "经过", "由于", "当", "从", "对", "把", "将", "给", "由", "比", "让", "其", "这", "那", "这些", "那些"
        }
        
        # 提取可能的关键词（2-6个字的中文词）
        potential_keywords = re.findall(r'[\u4e00-\u9fa5]{2,6}', text)
        
        # 使用Counter统计词频
        keyword_counter = Counter(potential_keywords)
        
        # 过滤并排序关键词
        filtered_keywords = []
        for keyword, count in keyword_counter.most_common():
            # 过滤条件：不是停用词，长度合适
            if keyword not in stop_words and 2 <= len(keyword) <= 6:
                filtered_keywords.append(keyword)
                # 限制返回数量
                if len(filtered_keywords) >= 30:
                    break
        
        # 避免冗余，例如如果已包含"北京大学"，就不要包含"北京"（如果先出现）
        final_keywords = []
        for keyword in filtered_keywords:
            # 检查是否与已添加的关键词存在包含关系
            is_redundant = False
            for added_keyword in final_keywords:
                # 如果当前关键词包含在已添加的关键词中，或者已添加的关键词包含在当前关键词中
                # 优先保留较长的关键词（通常更具体）
                if keyword in added_keyword or added_keyword in keyword:
                    is_redundant = True
                    # 如果当前关键词更长且更具体，替换已添加的关键词
                    if len(keyword) > len(added_keyword):
                        final_keywords.remove(added_keyword)
                        final_keywords.append(keyword)
                    break
            
            if not is_redundant:
                final_keywords.append(keyword)
            
            # 确保返回一定数量的关键词，但不超过30个
            if len(final_keywords) >= 30:
                break
        
        return final_keywords
    
    def _generate_today_stats(self, summary_data: Dict[str, Any], data_collection_result: Dict[str, Any]) -> Dict[str, Any]:
        """生成今日热点统计"""
        total_hotspots = 0
        platform_counts = {}
        
        # 统计各平台热点数量
        if summary_data:
            for platform_key, platform_data in summary_data.items():
                if isinstance(platform_data, dict) and "data" in platform_data:
                    count = len(platform_data["data"])
                    total_hotspots += count
                    platform_counts[platform_key] = count
        
        # 如果没有数据，使用默认值
        if total_hotspots == 0:
            total_hotspots = random.randint(15, 35)
            platform_counts = {
                "weibo": random.randint(5, 12),
                "toutiao": random.randint(4, 10),
                "douyin": random.randint(6, 13)
            }
        
        return {
            "today_hotspots": str(total_hotspots),
            "platform_breakdown": platform_counts,
            "growth_rate": f"+{random.randint(5, 25)}%",  # 模拟增长率
            "peak_hour": f"{random.randint(19, 22)}:00"  # 模拟高峰时段
        }
    
    def _generate_user_coverage_trend(self) -> Dict[str, Any]:
        """生成用户覆盖数趋势"""
        # 生成过去7天的模拟数据
        base_coverage = random.randint(800000, 1200000)
        trend_data = []
        
        for i in range(7):
            date = (datetime.now() - timedelta(days=6-i)).strftime("%m-%d")
            # 模拟波动
            variation = random.randint(-50000, 80000)
            coverage = base_coverage + variation
            trend_data.append({
                "date": date,
                "coverage": coverage,
                "growth": variation
            })
        
        current_coverage = trend_data[-1]["coverage"]
        
        return {
            "current_coverage": f"{current_coverage:,}",
            "trend_data": trend_data,
            "trend_direction": "up" if trend_data[-1]["growth"] > 0 else "down",
            "weekly_growth": f"+{random.randint(2, 15)}%"
        }
    
    def _generate_platform_trends(self, summary_data: Dict[str, Any], data_collection_result: Dict[str, Any]) -> Dict[str, Any]:
        """生成平台热度趋势"""
        platforms = ["weibo", "toutiao", "douyin"]
        platform_trends = {}
        
        for platform in platforms:
            # 生成过去24小时的热度趋势（每小时一个数据点）
            base_heat = random.randint(60, 90)
            hourly_data = []
            
            for hour in range(24):
                # 模拟一天中的热度变化（晚上更活跃）
                time_factor = 1.0
                if 19 <= hour <= 22:  # 晚上高峰
                    time_factor = 1.3
                elif 12 <= hour <= 14:  # 中午小高峰
                    time_factor = 1.1
                elif 0 <= hour <= 6:  # 深夜低谷
                    time_factor = 0.7
                
                heat = int(base_heat * time_factor + random.randint(-10, 10))
                heat = max(0, min(100, heat))  # 限制在0-100之间
                
                hourly_data.append({
                    "hour": f"{hour:02d}:00",
                    "heat": heat
                })
            
            platform_trends[platform] = {
                "name": {"weibo": "微博", "toutiao": "头条", "douyin": "抖音"}[platform],
                "current_heat": hourly_data[-1]["heat"],
                "trend_data": hourly_data,
                "peak_hour": max(hourly_data, key=lambda x: x["heat"])["hour"]
            }
        
        return platform_trends
