import torch
import numpy as np
from torch.optim import Adam
from torch.distributions import Normal  # 添加Normal分布导入
from networks import ActorNetwork, CentralizedCritic

# 定义设备（补充device定义）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class RolloutBuffer:
    """独立经验回放缓冲区"""
    def __init__(self):
        self.states = []
        self.actions = []
        self.logprobs = []
        self.rewards = []
        self.next_states = []
        self.dones = []
    
    def clear(self):
        self.states.clear()
        self.actions.clear()
        self.logprobs.clear()
        self.rewards.clear()
        self.next_states.clear()
        self.dones.clear()

class MAPPO:
    def __init__(self, num_agents, actor_dims, global_state_dim, action_dims, action_bounds,
                 lr_actor=3e-4, lr_critic=1e-3, gamma=0.99, K_epochs=10, eps_clip=0.2,
                 action_std_init=0.6, action_std_decay_rate=0.05, min_action_std=0.1):
        
        self.num_agents = num_agents
        # 初始化所有智能体的Actor
        self.actors = [
            ActorNetwork(actor_dims[i], action_dims[i]).to(device) for i in range(num_agents)
        ]
        self.actor_optimizers = [Adam(actor.parameters(), lr=lr_actor) for actor in self.actors]
        
        # 设置动作缩放参数
        for i, actor in enumerate(self.actors):
            low, high = action_bounds[i]
            actor.set_action_scale(low, high)
        
        # 中心化Critic
        self.critic = CentralizedCritic(global_state_dim, num_agents).to(device)
        self.critic_optimizer = Adam(self.critic.parameters(), lr=lr_critic)
        
        # 超参数配置
        self.gamma = gamma
        self.K_epochs = K_epochs
        self.eps_clip = eps_clip
        self.buffers = [RolloutBuffer() for _ in range(num_agents)]

        # 动作噪声参数
        self.action_std_init = action_std_init
        self.action_std_decay_rate = action_std_decay_rate
        self.min_action_std = min_action_std
        self.current_action_std = action_std_init

    def select_action(self, agent_id, observation):
        """为指定智能体选择动作"""
        state = torch.FloatTensor(observation).to(device)# 转换为张量
        with torch.no_grad():
            action_mean = self.actors[agent_id](state)
            dist = Normal(action_mean, torch.ones_like(action_mean)*0.1)  # 固定标准差
            action = dist.sample()
            logprob = dist.log_prob(action)
        return action.cpu().numpy(), logprob.cpu().numpy()
    def decay_action_std(self):
        """衰减动作噪声标准差"""
        # print(f"当前动作噪声标准差: {self.current_action_std:.4f}")
        self.current_action_std = max(
            self.current_action_std * (1 - self.action_std_decay_rate),
            self.min_action_std
        )
    def store_transition(self, agent_id, state, action, logprob, reward, next_state, done):
        """存储经验到对应缓冲区"""
        self.buffers[agent_id].states.append(state)
        self.buffers[agent_id].actions.append(action)
        self.buffers[agent_id].logprobs.append(logprob)
        self.buffers[agent_id].rewards.append(reward)
        self.buffers[agent_id].next_states.append(next_state)
        self.buffers[agent_id].dones.append(done)

    def update(self, global_state):
        """
        执行MAPPO的集中式策略更新
        修改重点：
        1. 确保returns和V的形状为(batch_size, num_agents)
        2. 添加维度检查和自动广播机制
        3. 优化折扣回报计算
        """
        # 转换全局状态为张量 (形状: (batch_size, global_state_dim))
        global_state = torch.FloatTensor(np.array(global_state)).to(device)
        
        # 获取全局状态价值 (形状应调整为(batch_size, num_agents))
        V = self.critic(global_state)
        
        # 计算每个智能体的折扣回报
        returns = []
        for agent_id in range(self.num_agents):
            # 获取当前智能体的奖励序列
            rewards = np.array(self.buffers[agent_id].rewards)
            
            # 计算折扣回报 (从后向前)
            R = 0
            discounted_rewards = []
            for r in reversed(rewards):
                R = r + self.gamma * R
                discounted_rewards.insert(0, R)
            
            # 转换为张量并保存
            returns.append(torch.FloatTensor(discounted_rewards).to(device))
        
        # 堆叠所有智能体的回报 (形状: (batch_size, num_agents))
        returns = torch.stack(returns, dim=1)
        
        # 维度检查与调整
        if V.dim() == 1:
            if len(V) == self.num_agents:  # 如果V是(num_agents,)向量
                V = V.unsqueeze(0).expand(returns.size(0), -1)  # 广播到(batch_size, num_agents)
            else:  # 如果V是(batch_size,)向量
                V = V.unsqueeze(1).expand(-1, self.num_agents)  # 广播到(batch_size, num_agents)
        
        # 验证最终形状
        assert returns.shape == V.shape, \
            f"Shape mismatch: returns {returns.shape} vs V {V.shape}. " \
            f"Global state input shape was {global_state.shape}"
        
        # 计算优势函数
        advantages = returns - V.detach()
        
        # 执行PPO更新
        total_actor_loss = 0
        total_critic_loss = 0
        
        for agent_id in range(self.num_agents):
            # 获取当前智能体的经验
            states = torch.FloatTensor(np.array(self.buffers[agent_id].states)).to(device)
            actions = torch.FloatTensor(np.array(self.buffers[agent_id].actions)).to(device)
            old_logprobs = torch.FloatTensor(np.array(self.buffers[agent_id].logprobs)).to(device)
            
            # 多轮策略优化
            for _ in range(self.K_epochs):
                # 计算新策略的概率
                action_means = self.actors[agent_id](states)
                dist = Normal(action_means, torch.ones_like(action_means)*0.1)
                logprobs = dist.log_prob(actions)
                entropy = dist.entropy().mean()
                
                # 计算重要性采样比率
                ratios = torch.exp(logprobs - old_logprobs.detach())
                
                # PPO损失计算
                surr1 = ratios * advantages[:, agent_id]
                surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages[:, agent_id]
                actor_loss = -torch.min(surr1, surr2).mean() - 0.01*entropy
                
                # 更新Actor
                self.actor_optimizers[agent_id].zero_grad()
                actor_loss.backward()
                self.actor_optimizers[agent_id].step()
                total_actor_loss += actor_loss.item()
            
            # 清空缓冲区
            self.buffers[agent_id].clear()
        
        # 更新Critic
        critic_loss = torch.nn.MSELoss()(V, returns)
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()
        total_critic_loss = critic_loss.item()
        
        # 衰减动作噪声
        self.decay_action_std()
        return total_actor_loss/self.num_agents, total_critic_loss