from typing import List, Dict, Any, Optional, Tuple, Set
from dataclasses import dataclass
from collections import defaultdict
import asyncio
import numpy as np
from scipy.spatial.distance import cosine
from utils.logger import Logger
from utils.constants import TaskPriority, TASK_STATUSES
from app.core.db_utils import DatabaseUtils
from middleware.redis_adapter import get_redis_adapter
from apps.orchestrator.async_orchestrator_agent import AsyncOrchestratorAgent
from middleware.model_client import ModelClientFactory, ModelBackend
from config.model_config import model_config_manager

logger = Logger.get_logger(agent_name="smart_task_distributor")

@dataclass
class AgentProfile:
    """智能体配置文件"""
    agent_id: str
    agent_type: str
    capabilities: List[str]
    performance_metrics: Dict[str, float]  # 如成功率、平均响应时间等
    current_load: int  # 当前任务数
    supported_model_backends: List[str] = None  # 支持的模型后端列表
    embedding: Optional[List[float]] = None  # 能力的嵌入向量

@dataclass
class TaskEmbeddingInfo:
    """任务嵌入信息"""
    task_id: str
    query: str
    embedding: List[float]
    complexity_score: float  # 任务复杂度评分
    task_type: str

class SmartTaskDistributor:
    """基于语义嵌入和夏普利值的智能任务分发器"""
    
    def __init__(self, preferred_backend: Optional[str] = None):
        self.redis_client = get_redis_adapter()
        self.agent_profiles: Dict[str, AgentProfile] = {}
        self.task_embeddings: Dict[str, TaskEmbeddingInfo] = {}
        self.async_orchestrator = AsyncOrchestratorAgent()
        self.embedding_cache_ttl = 3600  # 嵌入缓存的生存时间（秒）
        self.shapley_computation_batch_size = 10  # 夏普利值计算的批次大小
        
        # 模型后端配置
        self.model_config = model_config_manager.get_config()
        self.preferred_backend = preferred_backend or self.model_config.backend.value
        self.model_client_factory = ModelClientFactory()
        self.model_clients: Dict[str, Any] = {}
        
        logger.info(f"SmartTaskDistributor initialized successfully with preferred backend: {self.preferred_backend}")
    
    async def initialize(self):
        """初始化分发器，加载智能体配置文件和性能数据"""
        await self._initialize_model_clients()
        await self._load_agent_profiles()
        await self._compute_agent_embeddings()
    
    async def _initialize_model_clients(self):
        """初始化支持的模型客户端"""
        try:
            # 检查支持的后端列表
            supported_backends = self.model_config.get('supported_backends', ['vllm'])
            
            for backend in supported_backends:
                try:
                    # 创建模型客户端
                    client = self.model_client_factory.create_client(backend)
                    self.model_clients[backend] = client
                    logger.info(f"Initialized model client for backend: {backend}")
                except Exception as e:
                    logger.warning(f"Failed to initialize model client for backend {backend}: {str(e)}")
            
            # 如果首选后端不可用，尝试使用其他可用后端
            if self.preferred_backend not in self.model_clients and self.model_clients:
                self.preferred_backend = next(iter(self.model_clients.keys()))
                logger.warning(f"Preferred backend {self.preferred_backend} not available, using fallback: {self.preferred_backend}")
                
        except Exception as e:
            logger.error(f"Error initializing model clients: {str(e)}", exc_info=True)
            
    async def _load_agent_profiles(self):
        """加载所有智能体的配置文件和性能数据"""
        # 从Redis或数据库加载智能体配置文件
        try:
            # 获取所有健康的智能体实例
            healthy_agents = await self.async_orchestrator._get_healthy_agents()
            
            for agent_id, agent_info in healthy_agents.items():
                # 获取智能体性能指标
                performance_metrics = await self._get_agent_performance(agent_id)
                
                # 创建智能体配置文件
                profile = AgentProfile(
                    agent_id=agent_id,
                    agent_type=agent_info.get('agent_type', 'unknown'),
                    capabilities=agent_info.get('capabilities', []),
                    performance_metrics=performance_metrics,
                    current_load=agent_info.get('task_count', 0),
                    supported_model_backends=agent_info.get('supported_model_backends', ['vllm', 'ollama'])  # 默认支持vllm和ollama
                )
                self.agent_profiles[agent_id] = profile
            
            logger.info(f"Loaded {len(self.agent_profiles)} agent profiles")
        except Exception as e:
            logger.error(f"Failed to load agent profiles: {str(e)}", exc_info=True)
    
    async def _get_agent_performance(self, agent_id: str) -> Dict[str, float]:
        """获取智能体性能指标"""
        try:
            # 从Redis获取性能指标
            performance_key = f"agent_performance:{agent_id}"
            metrics = self.redis_client.hgetall(performance_key)
            
            # 默认指标
            default_metrics = {
                'success_rate': 0.5,
                'avg_response_time': 1.0,
                'throughput': 10.0,
                'resource_efficiency': 0.7
            }
            
            if metrics:
                # 转换为浮点数
                for k, v in metrics.items():
                    key = k.decode('utf-8')
                    if key in default_metrics:
                        default_metrics[key] = float(v.decode('utf-8'))
            
            return default_metrics
        except Exception as e:
            logger.error(f"Failed to get agent performance for {agent_id}: {str(e)}")
            return {
                'success_rate': 0.5,
                'avg_response_time': 1.0,
                'throughput': 10.0,
                'resource_efficiency': 0.7
            }
    
    async def _generate_embedding(self, text: str, backend: Optional[str] = None) -> List[float]:
        """生成文本的嵌入向量，支持不同的模型后端"""
        try:
            # 选择使用的后端
            target_backend = backend or self.preferred_backend
            
            # 检查客户端是否可用
            if target_backend in self.model_clients:
                # 使用模型客户端生成嵌入
                client = self.model_clients[target_backend]
                embedding = await client.generate_embedding(text)
                return embedding
            else:
                # 回退到DatabaseUtils
                logger.warning(f"Model client for backend {target_backend} not available, falling back to DatabaseUtils")
                return await DatabaseUtils.generate_embedding(text)
        except Exception as e:
            logger.error(f"Error generating embedding: {str(e)}")
            # 返回默认嵌入向量
            return [0.0] * 1536  # 1536维零向量
    
    async def _compute_agent_embeddings(self):
        """计算智能体能力的嵌入向量"""
        for agent_id, profile in self.agent_profiles.items():
            try:
                # 合并能力描述为单个文本
                capability_text = " ".join(profile.capabilities)
                
                # 尝试使用智能体支持的后端生成嵌入
                embedding = None
                for backend in profile.supported_model_backends or [self.preferred_backend]:
                    try:
                        embedding = await self._generate_embedding(capability_text, backend)
                        if embedding:
                            break
                    except Exception:
                        continue
                
                # 如果所有尝试都失败，使用默认后端
                if not embedding:
                    embedding = await self._generate_embedding(capability_text)
                
                profile.embedding = embedding
                
                # 缓存嵌入向量到Redis
                embedding_key = f"agent_embedding:{agent_id}"
                self.redis_client.setex(
                    embedding_key,
                    self.embedding_cache_ttl,
                    "|".join(map(str, embedding))  # 向量序列化存储
                )
            except Exception as e:
                logger.error(f"Failed to compute embedding for agent {agent_id}: {str(e)}")
    
    async def compute_task_embedding(self, task: Dict[str, Any]) -> TaskEmbeddingInfo:
        """计算任务的嵌入向量和复杂度评分"""
        task_id = task['task_id']
        query = task['query']
        
        # 检查缓存
        cache_key = f"task_embedding:{task_id}"
        cached = self.redis_client.get(cache_key)
        
        if cached:
            try:
                # 从缓存解析嵌入信息
                parts = cached.decode('utf-8').split('|||')
                if len(parts) >= 3:
                    embedding = list(map(float, parts[0].split('|')))
                    complexity_score = float(parts[1])
                    return TaskEmbeddingInfo(
                        task_id=task_id,
                        query=query,
                        embedding=embedding,
                        complexity_score=complexity_score,
                        task_type=task.get('task_type', 'unknown')
                    )
            except Exception as e:
                logger.warning(f"Failed to parse cached embedding for task {task_id}: {str(e)}")
        
        # 生成新的嵌入向量
        try:
            # 使用配置的模型后端生成嵌入
            backend = task.get('model_backend')  # 任务可以指定使用的后端
            embedding = await self._generate_embedding(query, backend)
            
            # 计算任务复杂度评分（基于嵌入的方差和文本长度）
            complexity_score = self._compute_complexity_score(query, embedding)
            
            # 创建嵌入信息
            embedding_info = TaskEmbeddingInfo(
                task_id=task_id,
                query=query,
                embedding=embedding,
                complexity_score=complexity_score,
                task_type=task.get('task_type', 'unknown')
            )
            
            # 缓存结果
            cache_value = f"{'|'.join(map(str, embedding))}|||{complexity_score}|||{task.get('task_type', 'unknown')}"
            self.redis_client.setex(cache_key, self.embedding_cache_ttl, cache_value)
            
            self.task_embeddings[task_id] = embedding_info
            return embedding_info
        except Exception as e:
            logger.error(f"Failed to compute embedding for task {task_id}: {str(e)}")
            # 返回默认值
            return TaskEmbeddingInfo(
                task_id=task_id,
                query=query,
                embedding=[0.0] * 1536,  # 1536维零向量
                complexity_score=0.5,
                task_type=task.get('task_type', 'unknown')
            )
    
    def _compute_complexity_score(self, query: str, embedding: List[float]) -> float:
        """计算任务复杂度评分"""
        # 文本长度因素（归一化到0-0.3）
        length_factor = min(len(query) / 500, 0.3)
        
        # 嵌入向量方差因素（归一化到0-0.3）
        variance = np.var(embedding)
        variance_factor = min(variance * 100, 0.3)  # 假设方差范围在0-0.003
        
        # 关键词因素（归一化到0-0.4）
        complex_keywords = ['分析', '比较', '整合', '推理', '预测', '多步骤', '跨模态', '复杂']
        keyword_count = sum(1 for keyword in complex_keywords if keyword in query)
        keyword_factor = min(keyword_count / 5, 0.4)
        
        # 总复杂度评分（0-1）
        complexity_score = length_factor + variance_factor + keyword_factor
        return min(complexity_score, 1.0)
    
    def calculate_similarity_score(self, task_embedding: List[float], agent_embedding: List[float]) -> float:
        """计算任务与智能体能力的语义相似度"""
        if not task_embedding or not agent_embedding:
            return 0.0
        
        try:
            # 余弦相似度（转换为0-1范围）
            similarity = 1 - cosine(task_embedding, agent_embedding)
            return (similarity + 1) / 2  # 归一化到0-1
        except Exception as e:
            logger.error(f"Failed to calculate similarity: {str(e)}")
            return 0.0
    
    async def compute_shapley_values(self, task: Dict[str, Any], candidate_agents: List[str]) -> Dict[str, float]:
        """计算候选智能体对任务的夏普利值贡献度"""
        # 获取任务嵌入
        task_embedding_info = await self.compute_task_embedding(task)
        
        # 获取任务指定的模型后端
        task_backend = task.get('model_backend', self.preferred_backend)
        
        # 初始化夏普利值
        shapley_values = {agent_id: 0.0 for agent_id in candidate_agents}
        
        # 简化的夏普利值计算（基于边际贡献）
        # 在实际应用中，这可能需要蒙特卡洛采样来近似计算
        total_agents = len(candidate_agents)
        
        if total_agents == 0:
            return shapley_values
        
        # 对于每个智能体，计算其边际贡献
        for i, agent_id in enumerate(candidate_agents):
            try:
                agent = self.agent_profiles.get(agent_id)
                if not agent or not agent.embedding:
                    continue
                
                # 计算语义匹配度
                semantic_score = self.calculate_similarity_score(
                    task_embedding_info.embedding, 
                    agent.embedding
                )
                
                # 考虑性能因素
                performance_score = 0.7 * agent.performance_metrics.get('success_rate', 0.5) + \
                                   0.2 * (1 / max(agent.performance_metrics.get('avg_response_time', 1.0), 0.1)) + \
                                   0.1 * agent.performance_metrics.get('resource_efficiency', 0.7)
                
                # 考虑负载因素（负载越低，得分越高）
                load_factor = max(1.0 - (agent.current_load / 10.0), 0.1)  # 假设每10个任务为满负载
                
                # 考虑模型后端兼容性因素
                backend_compatibility = 0.5  # 默认兼容性
                supported_backends = agent.supported_model_backends or [self.preferred_backend]
                if task_backend in supported_backends:
                    backend_compatibility = 1.0  # 完全兼容
                elif self.preferred_backend in supported_backends:
                    backend_compatibility = 0.7  # 回退到首选后端
                
                # 计算智能体的综合评分（增加后端兼容性权重）
                agent_score = 0.4 * semantic_score + 0.3 * performance_score + 0.2 * load_factor + 0.1 * backend_compatibility
                
                # 简化的夏普利值计算：考虑智能体在不同组合中的边际贡献
                # 这里使用基于组合位置的概率加权
                # 实际上，完整的夏普利值需要考虑所有子集的可能
                for k in range(1, total_agents + 1):
                    # 智能体作为第k个加入的概率权重
                    if k <= i + 1:
                        weight = 1.0 / (i + 1)
                        # 边际贡献计算（简化）
                        shapley_values[agent_id] += weight * agent_score
            except Exception as e:
                logger.error(f"Failed to compute Shapley value for agent {agent_id}: {str(e)}")
        
        # 归一化夏普利值
        total = sum(shapley_values.values())
        if total > 0:
            shapley_values = {k: v / total for k, v in shapley_values.items()}
        
        return shapley_values
    
    async def select_optimal_agents(self, task: Dict[str, Any], k: int = 1) -> List[Tuple[str, float]]:
        """选择最优的k个智能体来执行任务"""
        # 获取任务指定的模型后端
        task_backend = task.get('model_backend', self.preferred_backend)
        
        # 获取所有候选智能体，并过滤支持所需后端的智能体
        candidate_agents = []
        for agent_id, profile in self.agent_profiles.items():
            # 检查智能体是否支持所需的模型后端
            supported_backends = profile.supported_model_backends or [self.preferred_backend]
            if task_backend in supported_backends:
                candidate_agents.append(agent_id)
            
        # 如果没有完全匹配的后端，尝试使用回退策略
        if not candidate_agents:
            logger.warning(f"No agents supporting backend {task_backend}, using all available agents")
            candidate_agents = list(self.agent_profiles.keys())
        
        if not candidate_agents:
            logger.warning("No candidate agents available")
            return []
        
        # 计算夏普利值
        shapley_values = await self.compute_shapley_values(task, candidate_agents)
        
        # 按夏普利值排序，选择前k个
        sorted_agents = sorted(
            shapley_values.items(), 
            key=lambda x: x[1], 
            reverse=True
        )
        
        return sorted_agents[:k]
    
    async def distribute_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
        """智能分发任务到最优的智能体"""
        try:
            # 计算任务复杂度
            task_embedding_info = await self.compute_task_embedding(task)
            
            # 获取或确定任务的模型后端
            task_backend = task.get('model_backend', self.preferred_backend)
            
            # 根据任务复杂度决定是否需要多个智能体协作
            num_agents = 1
            if task_embedding_info.complexity_score > 0.7:
                # 复杂任务可能需要多个智能体协作
                num_agents = min(3, len(self.agent_profiles))  # 最多3个智能体协作
            
            # 选择最优的智能体
            optimal_agents = await self.select_optimal_agents(task, num_agents)
            
            if not optimal_agents:
                logger.error(f"No suitable agent found for task {task['task_id']}")
                return {
                    'task_id': task['task_id'],
                    'status': 'error',
                    'message': 'No suitable agent found'
                }
            
            # 更新智能体负载
            for agent_id, _ in optimal_agents:
                if agent_id in self.agent_profiles:
                    self.agent_profiles[agent_id].current_load += 1
            
            # 记录任务分配
            allocation_key = f"task_allocation:{task['task_id']}"
            self.redis_client.setex(
                allocation_key,
                86400,  # 保留24小时
                ",".join([f"{agent_id}:{score:.4f}:{task_backend}" for agent_id, score in optimal_agents])
            )
            
            logger.info(f"Task {task['task_id']} allocated to agents: {[a[0] for a in optimal_agents]} using backend: {task_backend}")
            
            return {
                'task_id': task['task_id'],
                'allocated_agents': optimal_agents,
                'complexity_score': task_embedding_info.complexity_score,
                'model_backend': task_backend,
                'status': 'allocated'
            }
        except Exception as e:
            logger.error(f"Failed to distribute task {task['task_id']}: {str(e)}", exc_info=True)
            return {
                'task_id': task['task_id'],
                'status': 'error',
                'message': str(e)
            }
    
    async def update_agent_performance(self, agent_id: str, task_result: Dict[str, Any]):
        """根据任务结果更新智能体性能指标"""
        try:
            # 更新当前负载
            if agent_id in self.agent_profiles:
                self.agent_profiles[agent_id].current_load = max(0, 
                    self.agent_profiles[agent_id].current_load - 1)
            
            # 更新性能指标
            performance_key = f"agent_performance:{agent_id}"
            
            # 获取当前指标
            metrics = await self._get_agent_performance(agent_id)
            
            # 更新成功率（指数加权移动平均）
            success = task_result.get('status') == 'success'
            metrics['success_rate'] = 0.8 * metrics['success_rate'] + 0.2 * (1.0 if success else 0.0)
            
            # 更新响应时间
            response_time = task_result.get('response_time', 1.0)
            metrics['avg_response_time'] = 0.8 * metrics['avg_response_time'] + 0.2 * response_time
            
            # 更新吞吐量（简单计数器）
            metrics['throughput'] += 1
            
            # 保存更新后的指标
            for k, v in metrics.items():
                self.redis_client.hset(performance_key, k, v)
            
            # 设置过期时间（7天）
            self.redis_client.expire(performance_key, 7 * 86400)
            
        except Exception as e:
            logger.error(f"Failed to update agent performance for {agent_id}: {str(e)}")
    
    async def run_distribution_loop(self):
        """运行任务分发循环"""
        # 延迟导入以避免循环导入
        from auxiliary_agents.task_scheduler_agent import task_scheduler_agent
        
        while True:
            try:
                # 获取待处理的任务
                task = task_scheduler_agent.dequeue_task()
                if task:
                    # 智能分发任务
                    allocation_result = await self.distribute_task(task)
                    
                    # 更新任务状态
                    if allocation_result['status'] == 'allocated':
                        task_scheduler_agent.update_task_status(
                            task['task_id'], 
                            TASK_STATUSES.RUNNING,
                            f"Allocated to {len(allocation_result['allocated_agents'])} agents"
                        )
                
                # 每5秒检查一次队列
                await asyncio.sleep(5)
                
                # 每60秒重新加载智能体配置文件
                current_time = asyncio.get_event_loop().time()
                if hasattr(self, '_last_profile_update'):
                    if current_time - self._last_profile_update > 60:
                        await self._load_agent_profiles()
                        self._last_profile_update = current_time
                else:
                    self._last_profile_update = current_time
                    
            except Exception as e:
                logger.error(f"Error in distribution loop: {str(e)}", exc_info=True)
                await asyncio.sleep(1)


# 单例实例 - 支持多种模型后端
def get_smart_distributor(preferred_backend: Optional[str] = None) -> SmartTaskDistributor:
    """获取智能任务分发器实例，可指定首选模型后端"""
    # 使用函数而不是直接创建实例，以便支持不同后端配置
    instance = SmartTaskDistributor(preferred_backend)
    return instance

# 默认单例实例（使用配置中的默认后端）
smart_task_distributor = get_smart_distributor()

# 测试代码
async def test_smart_distributor():
    """测试智能任务分发器"""
    test_logger = Logger.update_context(task_id="test_distributor", agent_name="smart_task_distributor")
    test_logger.info("Starting test for smart task distributor")
    
    # 初始化分发器
    await smart_task_distributor.initialize()
    
    # 测试任务 - 包含不同后端配置的测试
    test_tasks = [
        {
            "task_id": "test_task_001",
            "task_type": "text_qa",
            "query": "解释量子计算的基本原理和应用场景",
            "metadata": {"priority": "high"},
            "model_backend": "vllm"  # 指定使用vllm后端
        },
        {
            "task_id": "test_task_002",
            "task_type": "tool_call",
            "query": "计算100到200之间所有质数的和",
            "metadata": {"priority": "medium"},
            "model_backend": "ollama"  # 指定使用ollama后端
        },
        {
            "task_id": "test_task_003",
            "task_type": "multi_modal",
            "query": "分析这张图片并描述其中的主要元素",
            "metadata": {"priority": "high"}  # 使用默认后端
        }
    ]
    
    # 测试任务分发
    for task in test_tasks:
        result = await smart_task_distributor.distribute_task(task)
        test_logger.info(f"Task distribution result: {result}")
    
    test_logger.success("Smart task distributor test completed")


if __name__ == "__main__":
    asyncio.run(test_smart_distributor())
