import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import numpy as np

# 优化后的学习参数 - 针对车联网计算卸载场景的稳定训练
LR_ACTOR = 1e-4      # 降低Actor学习率，提升训练稳定性
LR_CRITIC = 3e-4     # 适中的Critic学习率
GAMMA = 0.95         # 提高折扣因子，更好地考虑长期奖励
LAMBDA = 0.95        # GAE参数
NUM_EPOCH = 10       # 增加训练轮次，充分利用收集的经验
EPSILON_CLIP = 0.2   # PPO裁剪参数
BATCH_SIZE = 128      # 增大批次大小，提高梯度估计稳定性
MEMORY_SIZE = 4096   # 适中的经验池大小
UPDATE_FREQUENCY = 64 # 收集足够经验后再更新

# 新增参数用于提升训练稳定性
GRAD_CLIP = 0.5      # 梯度裁剪阈值
ENTROPY_COEF = 0.02  # 增加熵正则化系数，增强探索
VALUE_LOSS_COEF = 0.5 # 价值损失系数
TARGET_KL = 0.01     # 目标KL散度，用于早停

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim=256):
        super(Actor, self).__init__()
        self.fc1 = nn.Linear(state_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, hidden_dim)
        self.fc4 = nn.Linear(hidden_dim, action_dim)
        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()
        
        # 改进的权重初始化
        self._init_weights()

    def _init_weights(self):
        """改进的权重初始化方法"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.orthogonal_(m.weight, gain=0.01)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.relu(self.fc3(x))
        action_logits = self.fc4(x)
        action_probs = torch.softmax(action_logits, dim=-1)
        return action_probs

    def get_action_and_log_prob(self, state):
        """获取动作和对数概率，用于PPO训练"""
        action_probs = self.forward(state)
        # 添加小的噪声防止概率为0
        action_probs = action_probs + 1e-8
        action_probs = action_probs / action_probs.sum(dim=-1, keepdim=True)
        
        dist = Categorical(action_probs)
        action = dist.sample()
        log_prob = dist.log_prob(action)
        return action, log_prob, dist.entropy()

    def select_action(self, state):
        """选择动作（用于推理）"""
        with torch.no_grad():
            action_probs = self.forward(state)
            # 添加小的噪声防止概率为0
            action_probs = action_probs + 1e-8
            action_probs = action_probs / action_probs.sum(dim=-1, keepdim=True)
            
            dist = Categorical(action_probs)
            action_discrete_index = dist.sample()
            action_discrete = [0.0 for _ in range(6)]
            action_discrete[action_discrete_index.item()] = 1.0
            return action_discrete


class Critic(nn.Module):
    def __init__(self, state_dim, hidden_dim=256):
        super(Critic, self).__init__()
        self.fc1 = nn.Linear(state_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, 1)
        self.relu = nn.ReLU()
        
        # 改进的权重初始化
        self._init_weights()

    def _init_weights(self):
        """改进的权重初始化方法"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.orthogonal_(m.weight, gain=1.0)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        value = self.fc3(x)
        return value


class ReplayMemory:
    def __init__(self, capacity=MEMORY_SIZE):
        self.states = []
        self.actions = []
        self.rewards = []
        self.values = []
        self.log_probs = []
        self.next_states = []
        self.dones = []
        self.capacity = capacity

    def add_memo(self, state, action, reward, value, log_prob, next_state, done):
        """添加经验，包含当前状态和下一状态"""
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.values.append(value)
        self.log_probs.append(log_prob)
        self.next_states.append(next_state)
        self.dones.append(done)
        
        # 如果超过容量，移除最旧的经验
        if len(self.states) > self.capacity:
            self.states.pop(0)
            self.actions.pop(0)
            self.rewards.pop(0)
            self.values.pop(0)
            self.log_probs.pop(0)
            self.next_states.pop(0)
            self.dones.pop(0)

    def get_all_data(self):
        """获取所有存储的经验数据"""
        return (np.array(self.states),
                np.array(self.actions),
                np.array(self.rewards),
                np.array(self.values),
                np.array(self.log_probs),
                np.array(self.next_states),
                np.array(self.dones))

    def clear_memo(self):
        """清空所有经验"""
        self.states.clear()
        self.actions.clear()
        self.rewards.clear()
        self.values.clear()
        self.log_probs.clear()
        self.next_states.clear()
        self.dones.clear()

    def __len__(self):
        return len(self.states)


class PPOAgent:
    def __init__(self, state_dim, action_dim, has_continuous_action_space=False):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.has_continuous_action_space = has_continuous_action_space
        
        # 使用全局优化参数
        self.lr_actor = LR_ACTOR
        self.lr_critic = LR_CRITIC
        self.gamma = GAMMA
        self.lambda_gae = LAMBDA
        self.num_epochs = NUM_EPOCH
        self.eps_clip = EPSILON_CLIP
        self.batch_size = BATCH_SIZE
        self.update_frequency = UPDATE_FREQUENCY
        
        # 初始化网络
        self.actor = Actor(state_dim, action_dim).to(device)
        self.critic = Critic(state_dim).to(device)
        
        # 优化器 - 使用AdamW优化器，更好的权重衰减
        self.actor_optimizer = optim.AdamW(self.actor.parameters(), lr=self.lr_actor, weight_decay=1e-4)
        self.critic_optimizer = optim.AdamW(self.critic.parameters(), lr=self.lr_critic, weight_decay=1e-4)
        
        # 学习率调度器 - 更温和的衰减
        self.actor_scheduler = optim.lr_scheduler.ExponentialLR(self.actor_optimizer, gamma=0.995)
        self.critic_scheduler = optim.lr_scheduler.ExponentialLR(self.critic_optimizer, gamma=0.995)
        
        # 经验池
        self.replay_buffer = ReplayMemory()
        
        # 训练计数器
        self.update_count = 0
        self.step_count = 0

    def get_action(self, state):
        """获取动作和价值估计"""
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        
        # 获取动作
        action_discrete = self.actor.select_action(state)
        
        # 获取价值估计
        value = self.critic(state)
        
        return action_discrete, value.detach().cpu().numpy()[0]

    def get_action_with_log_prob(self, state):
        """获取动作、对数概率和价值估计（用于训练）"""
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        
        # 获取动作和对数概率
        action, log_prob, entropy = self.actor.get_action_and_log_prob(state)
        
        # 获取价值估计
        value = self.critic(state)
        
        return action.item(), log_prob.item(), value.item(), entropy.item()

    def compute_gae(self, rewards, values, next_values, dones):
        """计算广义优势估计（GAE）"""
        advantages = []
        gae = 0
        
        # 确保next_values的长度正确
        if len(next_values) != len(rewards):
            # 如果长度不匹配，使用最后一个值填充或截断
            if len(next_values) > len(rewards):
                next_values = next_values[:len(rewards)]
            else:
                # 扩展next_values
                last_value = next_values[-1] if len(next_values) > 0 else 0
                next_values = np.concatenate([next_values, [last_value] * (len(rewards) - len(next_values))])
        
        for i in reversed(range(len(rewards))):
            if i == len(rewards) - 1:
                next_value = next_values[i] if not dones[i] else 0
            else:
                next_value = values[i + 1]
            
            delta = rewards[i] + self.gamma * next_value * (1 - dones[i]) - values[i]
            gae = delta + self.gamma * self.lambda_gae * (1 - dones[i]) * gae
            advantages.insert(0, gae)
        
        return advantages

    def should_update(self):
        """判断是否应该更新网络"""
        return len(self.replay_buffer) >= self.update_frequency

    def update(self):
        """更新策略网络"""
        # 只有收集足够经验时才更新
        if not self.should_update():
            return
        
        # 获取所有经验数据
        states, actions, rewards, values, log_probs, next_states, dones = self.replay_buffer.get_all_data()
        
        if len(states) == 0:
            return
        
        # 计算下一状态的价值
        next_states_tensor = torch.FloatTensor(next_states).to(device)
        with torch.no_grad():
            next_values = self.critic(next_states_tensor).squeeze().cpu().numpy()
            # 确保next_values是一维数组
            if next_values.ndim == 0:
                next_values = np.array([next_values])
        
        # 计算优势和回报
        advantages = self.compute_gae(rewards, values, next_values, dones)
        returns = [adv + val for adv, val in zip(advantages, values)]
        
        # 标准化优势
        advantages = np.array(advantages)
        if advantages.std() > 1e-8:
            advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
        
        # 转换为张量
        states_tensor = torch.FloatTensor(states).to(device)
        actions_tensor = torch.LongTensor(actions).to(device)
        old_log_probs_tensor = torch.FloatTensor(log_probs).to(device)
        advantages_tensor = torch.FloatTensor(advantages).to(device)
        returns_tensor = torch.FloatTensor(returns).to(device)
        
        # PPO更新
        for epoch in range(self.num_epochs):
            # 随机打乱数据
            indices = torch.randperm(len(states))
            
            # 计算KL散度用于早停
            total_kl = 0
            num_batches = 0
            
            for start in range(0, len(states), self.batch_size):
                end = min(start + self.batch_size, len(states))
                batch_indices = indices[start:end]
                
                batch_states = states_tensor[batch_indices]
                batch_actions = actions_tensor[batch_indices]
                batch_old_log_probs = old_log_probs_tensor[batch_indices]
                batch_advantages = advantages_tensor[batch_indices]
                batch_returns = returns_tensor[batch_indices]
                
                # 计算当前策略的动作概率
                action_probs = self.actor(batch_states)
                action_probs = action_probs + 1e-8  # 防止数值不稳定
                action_probs = action_probs / action_probs.sum(dim=-1, keepdim=True)
                
                dist = Categorical(action_probs)
                new_log_probs = dist.log_prob(batch_actions)
                entropy = dist.entropy()
                
                # 计算比率
                ratio = torch.exp(new_log_probs - batch_old_log_probs)
                
                # 计算KL散度
                kl = (batch_old_log_probs - new_log_probs).mean()
                total_kl += kl.item()
                num_batches += 1
                
                # PPO损失
                surr1 = ratio * batch_advantages
                surr2 = torch.clamp(ratio, 1 - self.eps_clip, 1 + self.eps_clip) * batch_advantages
                actor_loss = -torch.min(surr1, surr2).mean() - ENTROPY_COEF * entropy.mean()
                
                # 价值损失
                current_values = self.critic(batch_states).squeeze()
                value_loss = nn.MSELoss()(current_values, batch_returns) * VALUE_LOSS_COEF
                
                # 更新Actor
                self.actor_optimizer.zero_grad()
                actor_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.actor.parameters(), GRAD_CLIP)
                self.actor_optimizer.step()
                
                # 更新Critic
                self.critic_optimizer.zero_grad()
                value_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.critic.parameters(), GRAD_CLIP)
                self.critic_optimizer.step()
            
            # 早停机制：如果KL散度过大，停止训练
            avg_kl = total_kl / max(num_batches, 1)
            if avg_kl > TARGET_KL:
                break
        
        # 学习率调度 - 每200次更新调整一次
        self.update_count += 1
        if self.update_count % 200 == 0:
            self.actor_scheduler.step()
            self.critic_scheduler.step()
        
        # 清空经验池
        self.replay_buffer.clear_memo()

    def save_policy(self):
        torch.save(self.actor.state_dict(), f"ppo_policy_optimized.pth")
