import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical

# 定义设备
# 默认使用CPU运行，避免CUDA内存问题
device = torch.device('cpu')

class ActorCritic(nn.Module):
    def __init__(self, input_dim, action_dim1, action_dim2):
        super(ActorCritic, self).__init__()
        # 定义全连接层
        self.fc1 = nn.Linear(input_dim, 256)
        self.fc2 = nn.Linear(256, 256)
        # 定义actor层
        self.actor1 = nn.Linear(256, action_dim1)
        self.actor2 = nn.Linear(256, action_dim2)
        # 定义critic层
        self.critic = nn.Linear(256, 1)

    def forward(self, x):
        # 前向传播，使用ReLU激活函数
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        # 计算动作概率
        action_probs1 = torch.softmax(self.actor1(x), dim=-1)
        action_probs2 = torch.softmax(self.actor2(x), dim=-1)
        # 计算状态值
        state_value = self.critic(x)
        return action_probs1, action_probs2, state_value

    def evaluate(self, state, action):
        x = torch.relu(self.fc1(state))
        x = torch.relu(self.fc2(x))
        action_probs1 = torch.softmax(self.actor1(x), dim=-1)
        action_probs2 = torch.softmax(self.actor2(x), dim=-1)
        dist1 = Categorical(action_probs1)
        dist2 = Categorical(action_probs2)
        action_logprobs1 = dist1.log_prob(action[:, 0])
        action_logprobs2 = dist2.log_prob(action[:, 1])
        state_value = self.critic(x)
        return action_logprobs1, action_logprobs2, state_value

class PPOAgent:
    def __init__(self, env, agent_id, lr=5e-5, gamma=0.99, eps_clip=0.2, k_epochs=15, buffer_size=2048, gae_lambda=0.95,
                 actor_weight=1.0, critic_weight=0.3, entropy_weight=0.03):
        """
        PPOAgent初始化
        
        参数说明：
        - entropy_weight: 熵正则化权重，控制代理探索性
          推荐范围：
          * 0.01 - 0.05: 标准设置，平衡探索与利用
          * 0.1 - 0.3: 增加探索性，适用于复杂环境或训练初期
          * 0.001 - 0.005: 减少探索性，适用于训练后期或简单环境
        """
        self.env = env.unwrapped
        self.agent_id = agent_id
        self.gamma = gamma
        self.eps_clip = eps_clip
        self.k_epochs = k_epochs
        self.buffer_size = buffer_size
        self.gae_lambda = gae_lambda  # GAE的lambda参数，用于平衡偏差和方差
        
        # 损失权重参数
        self.actor_weight = actor_weight    # actor损失的权重
        self.critic_weight = critic_weight  # critic损失的权重
        self.entropy_weight = entropy_weight  # 熵正则化的权重

        # 从环境中获取输入输出维度
        input_dim, action_dim1, action_dim2 = self.env.get_agent_dims(agent_id)

        self.policy = ActorCritic(input_dim, action_dim1, action_dim2).to(device)
        self.optimizer = optim.Adam(self.policy.parameters(), lr=lr)
        self.policy_old = ActorCritic(input_dim, action_dim1, action_dim2).to(device)
        self.policy_old.load_state_dict(self.policy.state_dict())
        self.MseLoss = nn.MSELoss()

        self.memory = Memory()

    def select_action(self, state):
        # 选择动作
        with torch.no_grad():
            action_probs1, action_probs2, _ = self.policy_old(state)
        dist1 = Categorical(action_probs1)
        dist2 = Categorical(action_probs2)
        action1 = dist1.sample()
        action2 = dist2.sample()
        return (action1, action2), (dist1.log_prob(action1), dist2.log_prob(action2))

    def calculate_gae(self, rewards, values, masks):
        # 计算广义优势估计(GAE)
        if len(rewards) == 0:
            return torch.tensor([], device=device), torch.tensor([], device=device)
            
        advantages = torch.zeros_like(rewards).to(device)
        last_advantage = 0
        
        # 从后往前计算优势
        for t in reversed(range(len(rewards))):
            # 计算TD误差
            delta = rewards[t] + self.gamma * values[t+1] * masks[t] - values[t]
            # GAE公式：advantages[t] = delta + gamma * lambda * advantages[t+1]
            advantages[t] = delta + self.gamma * self.gae_lambda * masks[t] * last_advantage
            last_advantage = advantages[t]
            
        # 计算回报 = 优势 + 价值估计
        returns = advantages + values[:-1]
        
        # 只归一化优势函数，保持回报的原始尺度
        if advantages.std() > 0:
            advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
        
        return advantages, returns

    def update(self):
        # 准备数据
        if len(self.memory.states) == 0:
            return {
                'total_loss': 0.0,
                'actor_surr_loss1': 0.0,
                'actor_surr_loss2': 0.0,
                'critic_value_loss': 0.0,
                'total_entropy': 0.0
            }
            
        old_states = torch.squeeze(torch.stack(self.memory.states).to(device), 1).detach()
        old_actions = torch.squeeze(torch.stack(self.memory.actions).to(device), 1).detach()
        old_logprobs = torch.squeeze(torch.stack(self.memory.logprobs).to(device), 1).detach()
        rewards = torch.tensor(self.memory.rewards, dtype=torch.float32).to(device)
        masks = 1.0 - torch.tensor(self.memory.is_terminals, dtype=torch.float32).to(device)
        
        # 评估状态值以用于GAE计算
        with torch.no_grad():
            _, _, values = self.policy.evaluate(old_states, old_actions)
            values = values.squeeze()
        
        # 添加最终状态的值（设为0）
        values = torch.cat((values, torch.zeros(1).to(device))) if len(values) > 0 else torch.zeros(1).to(device)
        
        # 使用GAE计算优势和回报
        advantages, returns = self.calculate_gae(rewards, values, masks)
        
        # 调试信息
        print(f"调试信息 - 更新步骤:")
        print(f"  样本数: {len(self.memory.states)}")
        print(f"  奖励范围: [{rewards.min():.4f}, {rewards.max():.4f}]")
        print(f"  优势范围: [{advantages.min():.4f}, {advantages.max():.4f}]")
        print(f"  回报范围: [{returns.min():.4f}, {returns.max():.4f}]")

        # 初始化损失统计
        total_loss = 0
        # 使用更有意义的名称：actor_surr_loss1/2 表示两个动作分支的代理损失
        actor_surr_loss1 = 0
        actor_surr_loss2 = 0
        critic_value_loss = 0

        # 优化策略网络
        for epoch in range(self.k_epochs):
            # 评估旧动作和状态值
            logprobs1, logprobs2, state_values = self.policy.evaluate(old_states, old_actions)
            
            # 计算动作分布，用于熵计算
            with torch.no_grad():
                action_probs1, action_probs2, _ = self.policy.forward(old_states)
                dist1 = Categorical(action_probs1)
                dist2 = Categorical(action_probs2)

            # 计算比率 (pi_theta / pi_theta__old)
            ratios1 = torch.exp(logprobs1 - old_logprobs[:, 0].detach())
            ratios2 = torch.exp(logprobs2 - old_logprobs[:, 1].detach())

            # 使用GAE计算的advantages
            state_values = state_values.squeeze()
            
            # 计算代理损失
            surr1_1 = ratios1 * advantages
            surr1_2 = ratios2 * advantages
            surr2_1 = torch.clamp(ratios1, 1 - self.eps_clip, 1 + self.eps_clip) * advantages
            surr2_2 = torch.clamp(ratios2, 1 - self.eps_clip, 1 + self.eps_clip) * advantages
            surr1 = torch.min(surr1_1, surr2_1)
            surr2 = torch.min(surr1_2, surr2_2)
            
            # 分离计算各部分损失
            current_actor_surr_loss1 = -surr1.mean()  # 代理损失（PPO的核心损失函数）- 第一个动作维度
            current_actor_surr_loss2 = -surr2.mean()  # 代理损失（PPO的核心损失函数）- 第二个动作维度
            current_critic_value_loss = self.MseLoss(state_values, returns)  # 价值函数损失
            
            # 计算熵正则化项（促进探索）
            entropy1 = dist1.entropy().mean()
            entropy2 = dist2.entropy().mean()
            
            # 组合损失，应用权重
            # 熵正则化项应该是正的（鼓励探索），所以直接减去熵损失
            actor_loss = current_actor_surr_loss1 + current_actor_surr_loss2
            loss = (self.actor_weight * actor_loss + 
                    self.critic_weight * current_critic_value_loss - 
                    self.entropy_weight * (entropy1 + entropy2))
            
            # 累加损失统计
            total_loss += loss.item()
            actor_surr_loss1 += current_actor_surr_loss1.item()
            actor_surr_loss2 += current_actor_surr_loss2.item()
            critic_value_loss += current_critic_value_loss.item()

            # 执行梯度下降
            self.optimizer.zero_grad()
            loss.backward()
            
            # 调试：检查梯度
            if epoch == 0:
                grad_norms = []
                for name, param in self.policy.named_parameters():
                    if param.grad is not None:
                        grad_norms.append(param.grad.norm().item())
                if grad_norms:
                    print(f"  梯度范数范围: [{min(grad_norms):.4f}, {max(grad_norms):.4f}]")
            
            # 添加梯度裁剪防止梯度爆炸
            torch.nn.utils.clip_grad_norm_(self.policy.parameters(), max_norm=1.0)
            self.optimizer.step()

        # 更新旧策略网络的权重
        self.policy_old.load_state_dict(self.policy.state_dict())
        
        # 计算平均损失
        avg_total_loss = total_loss / self.k_epochs
        avg_actor_surr_loss1 = actor_surr_loss1 / self.k_epochs
        avg_actor_surr_loss2 = actor_surr_loss2 / self.k_epochs
        avg_critic_value_loss = critic_value_loss / self.k_epochs
        
        # 计算平均熵
        avg_entropy1 = entropy1.item() if 'entropy1' in locals() else 0
        avg_entropy2 = entropy2.item() if 'entropy2' in locals() else 0
        avg_total_entropy = (avg_entropy1 + avg_entropy2) / 2 if 'entropy1' in locals() else 0
        
        # 调试：输出详细损失分解
        print(f"  损失分解（原始值）:")
        print(f"    代理损失1原始值: {-avg_actor_surr_loss1:.4f}")
        print(f"    代理损失2原始值: {-avg_actor_surr_loss2:.4f}")
        print(f"    价值损失原始值: {avg_critic_value_loss:.4f}")
        print(f"    熵正则化项: {self.entropy_weight * avg_total_entropy * 2:.4f}")
        print(f"    总损失原始计算: {avg_total_loss:.4f} = ({self.actor_weight} * ({avg_actor_surr_loss1:.4f} + {avg_actor_surr_loss2:.4f})) + ({self.critic_weight} * {avg_critic_value_loss:.4f}) - ({self.entropy_weight} * {avg_total_entropy * 2:.4f})")
        
        # 输出详细的损失组件信息
        print(f"  损失组件（平均值）:")
        print(f"    代理损失1: {avg_actor_surr_loss1:.4f}")
        print(f"    代理损失2: {avg_actor_surr_loss2:.4f}")
        print(f"    价值函数损失: {avg_critic_value_loss:.4f}")
        print(f"    总熵: {avg_total_entropy:.4f}")
        
        return {
            'total_loss': avg_total_loss,
            'actor_surr_loss1': avg_actor_surr_loss1,  # 代理损失（第一个动作分支）
            'actor_surr_loss2': avg_actor_surr_loss2,  # 代理损失（第二个动作分支）
            'critic_value_loss': avg_critic_value_loss,  # 评论家价值损失
            'total_entropy': avg_total_entropy  # 策略熵（用于监控探索性）
        }

    def store_transition(self, state, action, log_prob, reward, is_terminal):
        self.memory.states.append(state)
        self.memory.actions.append(action)
        self.memory.logprobs.append(log_prob)
        self.memory.rewards.append(reward)
        self.memory.is_terminals.append(is_terminal)

    def clear_memory(self):
        """清空记忆，确保每次更新后重置"""
        self.memory.clear_memory()

    #保存模型
    def save(self, path):
        torch.save(self.policy.state_dict(), path)
    
    def behavior_clone_train(self, expert_data, epochs=5, batch_size=64):
        """
        使用行为克隆方法训练策略网络
        
        参数:
            expert_data: 专家轨迹数据，格式为[(state, action, reward), ...]
            epochs: 训练轮数
            batch_size: 批次大小
        
        返回:
            dict: 训练损失统计
        """
        import torch.optim as optim
        import numpy as np
        
        # 将专家数据转换为张量
        states = []
        actions = []
        
        for episode in expert_data:
            for step in episode:
                states.append(step['state'])
                actions.append(step['action'])
        
        states = torch.tensor(np.array(states), dtype=torch.float32).to(device)
        actions = torch.tensor(np.array(actions), dtype=torch.long).to(device)
        
        # 定义交叉熵损失函数
        criterion1 = nn.CrossEntropyLoss()
        criterion2 = nn.CrossEntropyLoss()
        
        # 训练统计
        total_losses = []
        actor1_losses = []
        actor2_losses = []
        
        print(f"开始行为克隆训练，共 {epochs} 轮，批次大小 {batch_size}")
        
        for epoch in range(epochs):
            # 打乱数据
            permutation = torch.randperm(states.size()[0])
            states = states[permutation]
            actions = actions[permutation]
            
            epoch_loss = 0
            epoch_actor1_loss = 0
            epoch_actor2_loss = 0
            
            # 分批训练
            for i in range(0, states.size()[0], batch_size):
                # 获取批次数据
                batch_states = states[i:i+batch_size]
                batch_actions = actions[i:i+batch_size]
                
                # 前向传播
                action_probs1, action_probs2, _ = self.policy(batch_states)
                
                # 计算损失
                actor1_loss = criterion1(action_probs1, batch_actions[:, 0])
                actor2_loss = criterion2(action_probs2, batch_actions[:, 1])
                loss = actor1_loss + actor2_loss
                
                # 反向传播和优化
                self.optimizer.zero_grad()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.policy.parameters(), max_norm=1.0)
                self.optimizer.step()
                
                # 累加损失
                epoch_loss += loss.item()
                epoch_actor1_loss += actor1_loss.item()
                epoch_actor2_loss += actor2_loss.item()
            
            # 计算平均损失
            avg_loss = epoch_loss / (states.size()[0] / batch_size)
            avg_actor1_loss = epoch_actor1_loss / (states.size()[0] / batch_size)
            avg_actor2_loss = epoch_actor2_loss / (states.size()[0] / batch_size)
            
            total_losses.append(avg_loss)
            actor1_losses.append(avg_actor1_loss)
            actor2_losses.append(avg_actor2_loss)
            
            print(f"BC Epoch {epoch+1}/{epochs} - 总损失: {avg_loss:.4f}, Actor1损失: {avg_actor1_loss:.4f}, Actor2损失: {avg_actor2_loss:.4f}")
        
        # 更新旧策略网络
        self.policy_old.load_state_dict(self.policy.state_dict())
        
        return {
            'total_loss': np.mean(total_losses),
            'actor1_loss': np.mean(actor1_losses),
            'actor2_loss': np.mean(actor2_losses)
        }

class Memory:
    def __init__(self):
        self.actions = []
        self.states = []
        self.logprobs = []
        self.rewards = []
        self.is_terminals = []

    def clear_memory(self):
        # 清空记忆
        del self.actions[:]
        del self.states[:]
        del self.logprobs[:]
        del self.rewards[:]
        del self.is_terminals[:]