from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from enum import Enum, auto
from .memory import ReplayMemory
import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque
import random

class DQN(nn.Module):
    """深度Q网络实现"""
    def __init__(self, input_size, output_size):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(input_size, 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, output_size)
        
    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)

class DecisionLayer:
    """决策层基类"""
    
    def __init__(self):
        self.next_layer = None
    
    def set_next(self, layer: 'DecisionLayer'):
        """设置下一决策层"""
        self.next_layer = layer
    
    def decide(self, state: 'State', context: Dict[str, Any]) -> Tuple[str, Dict[str, Any]]:
        """决策链传递"""
        if self.next_layer:
            return self.next_layer.decide(state, context)
        return "explore", {}  # 默认行为

class State(Enum):
    """智能体状态枚举"""
    EXPLORE = auto()
    PURSUE = auto()
    RETREAT = auto()
    COMMUNICATE = auto()

class Worker(DecisionLayer):
    """工作层智能体，实现具体行为策略"""
    
    def __init__(self, name: str = "DefaultAgent", learning_rate: float = 0.1, 
                 discount_factor: float = 0.95, exploration_rate: float = 0.05,
                 use_dqn: bool = False, state_size: int = 4, action_size: int = 4):
        """初始化智能体
        
        Args:
            name: 智能体名称
            learning_rate: Q学习率
            discount_factor: 折扣因子
            exploration_rate: 探索率
            use_dqn: 是否使用深度Q网络
            state_size: 状态向量大小(DQN用)
            action_size: 动作空间大小(DQN用)
        """
        # 训练统计
        self.episode_count = 0
        self.training_log = []
        self.eval_results = []
        self.name = name
        self.state = State.EXPLORE
        self.use_dqn = use_dqn
        
        if not self.use_dqn:
            # Q学习参数
            self.q_table: Dict[State, Dict[str, float]] = {
                state: {"explore": 0, "pursue": 1.0, "retreat": 0, "communicate": 0.5}
                for state in State
            }
        else:
            # DQN参数
            self.dqn = DQN(state_size, action_size)
            self.target_dqn = DQN(state_size, action_size)
            self.target_dqn.load_state_dict(self.dqn.state_dict())
            self.optimizer = optim.Adam(self.dqn.parameters(), lr=learning_rate)
            self.memory = ReplayMemory(capacity=10000)
            self.batch_size = 64
            self.update_target_every = 100
            self.train_step = 0
            
        self.learning_params = {
            "alpha": learning_rate,
            "gamma": discount_factor,
            "epsilon": exploration_rate
        }
        self.communication_buffer: List[Dict[str, Any]] = []
    
    def update_q_table(self, state: State, action: str, reward: float, next_state: State) -> Dict[str, Any]:
        """更新Q表实现强化学习并返回梯度
        
        Args:
            state: 当前状态
            action: 采取的动作
            reward: 获得的奖励
            next_state: 转移到的状态
            
        Returns:
            包含梯度信息的字典
        """
        current_q = self.q_table[state][action]
        max_next_q = max(self.q_table[next_state].values())
        td_error = reward + self.learning_params["gamma"] * max_next_q - current_q
        
        # 计算梯度
        grads = {
            "q_table": {
                str(state): {action: td_error},
                str(next_state): {"max_next": self.learning_params["gamma"] * td_error}
            },
            "learning_params": {
                "alpha": td_error,
                "gamma": reward * max_next_q
            }
        }
        
        # 本地更新
        new_q = current_q + self.learning_params["alpha"] * td_error
        self.q_table[state][action] = new_q
        
        return grads
    
    def decide(self, observation: Dict[str, Any], other_agents: Optional[List['Agent']] = None, 
              eval_mode: bool = False) -> str:
        """基于状态机和强化学习的决策
        
        Args:
            observation: 环境观察数据
            other_agents: 其他智能体列表（用于通信）
            eval_mode: 是否在评估模式下运行(禁用探索)
            
        Returns:
            决策动作的字符串表示
        """
        # 智能状态评估
        danger_level = observation.get("danger_level", 0)
        target_value = observation.get("target_value", 0)
        energy_level = observation.get("energy", 1.0)
        
        # 检查通信缓冲区中的紧急消息
        urgent_messages = [m for m in self.communication_buffer if m["priority"] >= 4]
        if urgent_messages:
            self.state = State.RETREAT if "danger" in urgent_messages[0]["data"] else State.PURSUE
            return self._process_urgent_message(urgent_messages[0])
        
        # 基于多因素的状态转移
        if danger_level > 0.7:
            self.state = State.RETREAT
        elif target_value > 0.8 and energy_level > 0.3:
            self.state = State.PURSUE
        elif (len(self.communication_buffer) > 0 and 
              energy_level > 0.5 and 
              not any(m["type"] == "request" for m in self.communication_buffer)):
            self.state = State.COMMUNICATE
        else:
            # 自适应探索率
            explore_prob = 0.3 + (0.5 * (1 - energy_level))
            if np.random.random() < explore_prob:
                self.state = State.EXPLORE
            else:
                # 根据Q值选择最佳状态
                if not self.use_dqn:
                    best_state = max(State, key=lambda s: max(self.q_table[s].values()))
                else:
                    state_vec = self._observation_to_vector(observation)
                    with torch.no_grad():
                        q_values = self.dqn(torch.FloatTensor(state_vec).unsqueeze(0))
                        best_state = State(q_values.argmax().item())
                self.state = best_state
        
        # 动作选择
        if not self.use_dqn:
            # Q学习策略
            if not eval_mode and np.random.random() < self.learning_params["epsilon"]:
                action = np.random.choice(list(self.q_table[self.state].keys()))
            else:
                action = max(self.q_table[self.state].items(), key=lambda x: x[1])[0]
        else:
            # DQN策略
            state_vec = self._observation_to_vector(observation)
            state_tensor = torch.FloatTensor(state_vec).unsqueeze(0)
            
            if not eval_mode and np.random.random() < self.learning_params["epsilon"]:
                action_idx = np.random.randint(0, self.action_size)
            else:
                with torch.no_grad():
                    q_values = self.dqn(state_tensor)
                    action_idx = torch.argmax(q_values).item()
            
            # 映射到动作字符串
            actions = ["explore", "pursue", "retreat", "communicate"]
            action = actions[action_idx % len(actions)]
        
        # 如果是通信状态，处理通信逻辑
        if action == "communicate" and other_agents:
            message = {"from": self.name, "data": observation}
            for agent in other_agents:
                agent.receive_message(message)
            return "communicate"
        
        return action
    
    def evaluate(self, env, n_episodes: int = 10) -> Dict[str, float]:
        """评估模型性能
        
        Args:
            env: 评估环境
            n_episodes: 评估的episode数量
            
        Returns:
            包含评估指标的字典
        """
        episode_rewards = []
        episode_steps = []
        
        for _ in range(n_episodes):
            obs = env.reset()
            done = False
            total_reward = 0
            step_count = 0
            
            while not done:
                action = self.decide(obs, eval_mode=True)
                next_obs, reward, done, _ = env.step(action)
                total_reward += reward
                step_count += 1
                obs = next_obs
                
            episode_rewards.append(total_reward)
            episode_steps.append(step_count)
        
        # 记录评估结果
        eval_result = {
            'episode': self.episode_count,
            'avg_reward': float(np.mean(episode_rewards)),
            'max_reward': float(np.max(episode_rewards)),
            'min_reward': float(np.min(episode_rewards)),
            'avg_steps': float(np.mean(episode_steps)),
            'timestamp': time.time()
        }
        self.eval_results.append(eval_result)
        
        return eval_result

    def _observation_to_vector(self, observation: Dict[str, Any]) -> List[float]:
        """将观察字典转换为状态向量"""
        return [
            float(observation.get("danger", False)),
            float(observation.get("target", False)),
            float(observation.get("distance", 0.0)),
            float(observation.get("energy", 1.0))
        ]
    
    def receive_message(self, message: Dict[str, Any]):
        """接收其他智能体的消息
        
        Args:
            message: 接收到的消息，格式为:
            {
                "from": 发送者名称,
                "type": 消息类型("alert"/"info"/"request"),
                "data": 消息内容,
                "priority": 优先级(1-5),
                "timestamp": 发送时间
            }
        """
        # 验证消息格式
        required_fields = {"from", "type", "data", "priority", "timestamp"}
        if not all(field in message for field in required_fields):
            print(f"[{self.name}] 收到无效消息格式: {message}")
            return
            
        # 根据优先级处理消息
        if message["priority"] >= 4:  # 高优先级立即处理
            if message["type"] == "alert":
                self.state = State.RETREAT
                print(f"[{self.name}] 处理高优先级警报: {message['data']}")
            elif message["type"] == "request":
                self._handle_request(message)
        else:
            # 普通消息加入缓冲区
            self.communication_buffer.append(message)
            print(f"[{self.name}] 缓存消息: {message['type']} from {message['from']}")
            
    def _handle_request(self, message: Dict[str, Any]):
        """处理请求类消息"""
        request = message["data"]
        if request.get("action") == "assist":
            print(f"[{self.name}] 响应协助请求 from {message['from']}")
            self.state = State.PURSUE
        elif request.get("action") == "share_info":
            print(f"[{self.name}] 响应信息共享请求 from {message['from']}")
            self.communication_buffer.append({
                "from": self.name,
                "type": "info",
                "data": self._get_shared_info(),
                "priority": 3,
                "timestamp": time.time()
            })

    def store_experience(self, state: Dict[str, Any], action: str, reward: float, 
                        next_state: Dict[str, Any], done: bool):
        """存储经验到回放缓冲区(DQN用)
        
        Args:
            state: 当前状态
            action: 采取的动作
            reward: 获得的奖励
            next_state: 转移到的状态
            done: 是否终止状态
        """
        if not self.use_dqn:
            return
            
        state_vec = self._observation_to_vector(state)
        action_idx = ["explore", "pursue", "retreat", "communicate"].index(action)
        next_state_vec = self._observation_to_vector(next_state)
        
        self.memory.push(state_vec, action_idx, reward, next_state_vec, done)

    def train_dqn(self) -> Optional[float]:
        """训练DQN模型
        
        Returns:
            训练损失值(如果进行了训练)，否则返回None
        """
        if not self.use_dqn or len(self.memory) < self.batch_size:
            return None
            
        # 从回放缓冲区采样
        batch = self.memory.sample(self.batch_size)
        
        states = torch.FloatTensor(np.array(batch['states']))
        actions = torch.LongTensor(batch['actions'])
        rewards = torch.FloatTensor(batch['rewards'])
        next_states = torch.FloatTensor(np.array(batch['next_states']))
        dones = torch.FloatTensor(batch['dones'])
        
        # 计算当前Q值和目标Q值
        current_q = self.dqn(states).gather(1, actions.unsqueeze(1))
        next_q = self.target_dqn(next_states).max(1)[0].detach()
        target_q = rewards + (1 - dones) * self.learning_params["gamma"] * next_q
        
        # 计算损失并更新
        loss = nn.MSELoss()(current_q.squeeze(), target_q)
        self.optimizer.zero_grad()
        loss.backward()
        
        # 梯度裁剪防止爆炸
        torch.nn.utils.clip_grad_norm_(self.dqn.parameters(), 1.0)
        self.optimizer.step()
        
        # 记录训练指标
        self._log_training(loss.item())
        
        # 定期更新目标网络
        self.train_step += 1
        if self.train_step % self.update_target_every == 0:
            self.target_dqn.load_state_dict(self.dqn.state_dict())
            
        return loss.item()
    
    def _get_current_observation(self) -> Dict[str, Any]:
        """获取当前环境观察数据
        
        Returns:
            包含当前环境状态的字典
        """
        return {
            "danger_level": 0.0,  # 默认值，实际应用中从环境获取
            "target_value": 0.0,  # 默认值
            "energy": 1.0,        # 默认能量值
            "team_reward": 0.0    # 默认团队奖励
        }

    def _log_training(self, loss: float):
        """记录训练指标"""
        self.training_log.append({
            'episode': self.episode_count,
            'step': self.train_step,
            'loss': loss,
            'epsilon': self.learning_params["epsilon"],
            'timestamp': time.time()
        })
        
        # 定期打印日志
        if len(self.training_log) % 10 == 0:
            avg_loss = np.mean([x['loss'] for x in self.training_log[-10:]])
            print(f"[{self.name}] Episode {self.episode_count} - "
                  f"Avg Loss: {avg_loss:.4f}, Epsilon: {self.learning_params['epsilon']:.3f}")

    def save_model(self, path: str):
        """保存模型参数到文件
        
        Args:
            path: 保存路径
        """
        if self.use_dqn:
            torch.save(self.dqn.state_dict(), path)
        else:
            with open(path, 'wb') as f:
                pickle.dump(self.q_table, f)

    def load_model(self, path: str):
        """从文件加载模型参数
        
        Args:
            path: 加载路径
        """
        if self.use_dqn:
            self.dqn.load_state_dict(torch.load(path))
            self.target_dqn.load_state_dict(self.dqn.state_dict())
        else:
            with open(path, 'rb') as f:
                self.q_table = pickle.load(f)
    
    def calculate_reward(self, observation: Dict[str, Any], action: str) -> float:
        """计算当前状态的奖励
        
        Args:
            observation: 环境观察数据
            action: 采取的动作
            
        Returns:
            计算得到的奖励值
        """
        reward = 0.0
        energy_cost = {
            "explore": 0.1,
            "pursue": 0.3,
            "retreat": 0.2,
            "communicate": 0.25
        }.get(action, 0.0)
        
        # 目标相关奖励
        target_value = observation.get("target_value", 0.0)
        reward += 2.0 * target_value  # 目标价值乘数
        
        # 危险惩罚
        danger_level = observation.get("danger_level", 0.0)
        reward -= 1.5 * danger_level  # 危险惩罚系数
        
        # 通信奖励
        if action == "communicate" and len(self.communication_buffer) > 0:
            last_msg = self.communication_buffer[-1]
            if last_msg["type"] == "info":
                reward += 0.3 * last_msg["priority"]  # 信息价值奖励
                
        # 能量效率奖励
        energy = observation.get("energy", 1.0)
        reward += 0.2 * (energy - energy_cost)  # 能量效率
        
        # 团队协作奖励
        if "team_reward" in observation:
            reward += 0.5 * observation["team_reward"]
            
        # 限制奖励范围
        return max(-1.0, min(2.0, reward))

    def execute(self, action: str, next_state: Optional[Dict[str, Any]] = None) -> float:
        """执行选定的动作并返回奖励
        
        Args:
            action: 要执行的动作
            next_state: 执行后的状态(用于DQN训练)
            
        Returns:
            执行动作获得的奖励
        """
        print(f"[{self.name}] 执行动作: {action}")
        # 使用新的奖励计算函数
        reward = self.calculate_reward(self._get_current_observation(), action)
        
        # 如果是DQN且提供了next_state，存储经验并训练
        if self.use_dqn and next_state is not None:
            self.store_experience(self.state, action, reward, next_state, False)
            self.train_dqn()
            
        return reward

class Manager(DecisionLayer):
    """管理层智能体，负责宏观决策"""
    
    def __init__(self, workers: List[Worker]):
        super().__init__()
        self.workers = workers
        self.task_queue = []
    
    def assign_task(self, task: Dict[str, Any]):
        """分配新任务"""
        self.task_queue.append(task)
    
    def decide(self, state: State, context: Dict[str, Any]) -> Tuple[str, Dict[str, Any]]:
        """宏观决策逻辑"""
        if state == State.PURSUE and self.task_queue:
            task = self.task_queue.pop(0)
            return "delegate", {"task": task}
        return super().decide(state, context)

# 示例用法
if __name__ == "__main__":
    # 本地模式示例
    print("=== 本地模式 ===")
    local_worker1 = Worker("LocalWorker1")
    local_worker2 = Worker("LocalWorker2")
    
    local_manager = Manager([local_worker1, local_worker2])
    local_worker1.set_next(local_manager)
    local_worker2.set_next(local_manager)
    
    obs = {"danger": False, "target": True}
    action = local_worker1.decide(obs, [local_worker2])
    reward = local_worker1.execute(action)
    grads = local_worker1.update_q_table(local_worker1.state, action, reward, State.PURSUE)
    print(f"本地梯度: {grads}")
    
    # 分布式模式示例
    print("\n=== 分布式模式 ===")
    try:
        import ray
        from .distributed import create_distributed_system
        
        ray.init()
        system = create_distributed_system(2)
        workers = system["workers"]
        
        # 获取远程Worker引用
        dist_worker1, dist_worker2 = workers
        
        # 同步初始参数
        ray.get(dist_worker1.sync_params.remote())
        ray.get(dist_worker2.sync_params.remote())
        
        # 分布式决策
        obs_ref = ray.put({"danger": False, "target": True})
        action_ref = dist_worker1.decide.remote(obs_ref, [dist_worker2])
        action = ray.get(action_ref)
        print(f"分布式决策结果: {action}")
        
        # 分布式学习
        reward = 1.0
        grads_ref = dist_worker1.update_q_table.remote(
            State.PURSUE, action, reward, State.PURSUE
        )
        grads = ray.get(grads_ref)
        print(f"分布式梯度: {grads}")
        
        # 推送梯度
        ray.get(dist_worker1.push_gradients.remote(grads))
        
    except ImportError:
        print("未安装Ray，跳过分布式示例")