import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import gymnasium as gym
from torch.utils.tensorboard import SummaryWriter

# 设置设备
device = torch.device("cpu")
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 策略网络
class ActorCritic(nn.Module):
    def __init__(self, input_dim, n_actions, hidden_dim=64):
        super(ActorCritic, self).__init__()
        
        # 共享特征提取层 - 使用更深的网络结构
        self.features = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU()
        )
        
        # Actor网络 (策略)
        self.actor = nn.Sequential(
            nn.Linear(hidden_dim, n_actions),
            nn.Softmax(dim=-1)
        )
        
        # Critic网络 (价值函数)
        self.critic = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, 1)
        )
        
        # 初始化权重
        self.apply(self._init_weights)
        
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            nn.init.orthogonal_(module.weight, gain=np.sqrt(2))
            if module.bias is not None:
                module.bias.data.zero_()
        
    def forward(self, x):
        features = self.features(x)
        action_probs = self.actor(features)
        value = self.critic(features)
        return action_probs, value
    
    def get_action(self, state, action=None):
        state = torch.FloatTensor(state).to(device)
        action_probs, value = self.forward(state)
        dist = Categorical(action_probs)
        
        if action is None:
            action = dist.sample()
        
        log_prob = dist.log_prob(action)
        entropy = dist.entropy()
        
        return action.item(), log_prob, entropy, value

# 经验回放缓冲区
class PPOMemory:
    def __init__(self, batch_size):
        self.states = []
        self.actions = []
        self.probs = []
        self.vals = []
        self.rewards = []
        self.dones = []
        self.batch_size = batch_size
    
    def store(self, state, action, prob, val, reward, done):
        self.states.append(state)
        self.actions.append(action)
        self.probs.append(prob.item())  # 存储标量值而不是张量
        self.vals.append(val.item())    # 存储标量值而不是张量
        self.rewards.append(reward)
        self.dones.append(done)
    
    def clear(self):
        self.states = []
        self.actions = []
        self.probs = []
        self.vals = []
        self.rewards = []
        self.dones = []
    
    def get_batches(self):
        n_states = len(self.states)
        batch_start = np.arange(0, n_states, self.batch_size)
        indices = np.arange(n_states, dtype=np.int64)
        np.random.shuffle(indices)
        batches = [indices[i:i+self.batch_size] for i in batch_start]
        
        return np.array(self.states), np.array(self.actions), np.array(self.probs), \
               np.array(self.vals), np.array(self.rewards), np.array(self.dones), batches

# 学习率线性衰减
class LinearLRSchedule:
    def __init__(self, start_lr, end_lr, max_steps):
        self.start_lr = start_lr
        self.end_lr = end_lr
        self.max_steps = max_steps
        self.current_step = 0
        
    def get_lr(self):
        progress = min(1.0, self.current_step / self.max_steps)
        return self.start_lr - progress * (self.start_lr - self.end_lr)
    
    def step(self):
        self.current_step += 1
        return self.get_lr()

# PPO Agent
class PPOAgent:
    def __init__(
            self, 
            env,
            hidden_dim=64,
            lr=0.0003,
            gamma=0.99,
            gae_lambda=0.95,
            policy_clip=0.2,
            batch_size=64,
            n_epochs=10,
            entropy_coef=0.01,
            value_coef=0.5,
            max_grad_norm=0.5,
            normalize_advantage=True,
            use_lr_decay=True,
            end_lr=1e-5,
            max_steps=1000000
        ):
        self.env = env
        self.gamma = gamma
        self.policy_clip = policy_clip
        self.n_epochs = n_epochs
        self.gae_lambda = gae_lambda
        self.entropy_coef = entropy_coef
        self.value_coef = value_coef
        self.max_grad_norm = max_grad_norm
        self.normalize_advantage = normalize_advantage
        self.use_lr_decay = use_lr_decay
        
        self.input_dim = env.observation_space.shape[0]
        self.n_actions = env.action_space.n
        
        self.actor_critic = ActorCritic(self.input_dim, self.n_actions, hidden_dim).to(device)
        self.optimizer = optim.Adam(self.actor_critic.parameters(), lr=lr)
        
        if use_lr_decay:
            self.lr_scheduler = LinearLRSchedule(lr, end_lr, max_steps)
        
        self.memory = PPOMemory(batch_size)
        self.total_steps = 0
        self.writer = SummaryWriter('runs/ppo_cartpole')
        
    def store_transition(self, state, action, prob, val, reward, done):
        self.memory.store(state, action, prob, val, reward, done)
    
    def update_lr(self):
        if self.use_lr_decay:
            new_lr = self.lr_scheduler.step()
            for param_group in self.optimizer.param_groups:
                param_group['lr'] = new_lr
            return new_lr
        return self.optimizer.param_groups[0]['lr']
    
    def learn(self):
        states, actions, old_probs, vals, rewards, dones, batches = self.memory.get_batches()
        
        values = vals
        advantage = np.zeros(len(rewards), dtype=np.float32)
        
        # 计算GAE优势估计
        for t in range(len(rewards)-1):
            discount = 1
            a_t = 0
            for k in range(t, len(rewards)-1):
                a_t += discount * (rewards[k] + self.gamma * values[k+1] * (1-dones[k]) - values[k])
                discount *= self.gamma * self.gae_lambda
            advantage[t] = a_t
        
        advantage = torch.tensor(advantage, dtype=torch.float32).to(device)
        values = torch.tensor(values, dtype=torch.float32).to(device)
        
        # 标准化优势
        if self.normalize_advantage and len(advantage) > 1:
            advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-8)
        
        for _ in range(self.n_epochs):
            for batch in batches:
                states_batch = torch.tensor(states[batch], dtype=torch.float32).to(device)
                old_probs_batch = torch.tensor(old_probs[batch], dtype=torch.float32).to(device)
                actions_batch = torch.tensor(actions[batch], dtype=torch.int64).to(device)
                
                # 获取新的动作概率和价值估计
                action_probs, critic_value = self.actor_critic(states_batch)
                critic_value = critic_value.squeeze()
                
                dist = Categorical(action_probs)
                new_probs = dist.log_prob(actions_batch)
                entropy = dist.entropy().mean()
                
                # 计算Actor损失
                prob_ratio = (new_probs - old_probs_batch).exp()
                weighted_probs = advantage[batch] * prob_ratio
                clipped_probs = torch.clamp(prob_ratio, 1-self.policy_clip, 1+self.policy_clip) * advantage[batch]
                actor_loss = -torch.min(weighted_probs, clipped_probs).mean()
                
                # 计算Critic损失
                returns = advantage[batch] + values[batch]
                critic_loss = nn.MSELoss()(critic_value, returns)
                
                # 总损失
                total_loss = actor_loss + self.value_coef * critic_loss - self.entropy_coef * entropy
                
                self.optimizer.zero_grad()
                total_loss.backward()
                # 梯度裁剪，防止梯度爆炸
                nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
                self.optimizer.step()
                
                # 记录损失
                self.writer.add_scalar('Loss/actor', actor_loss.item(), self.total_steps)
                self.writer.add_scalar('Loss/critic', critic_loss.item(), self.total_steps)
                self.writer.add_scalar('Loss/entropy', entropy.item(), self.total_steps)
        
        self.memory.clear()
    
    def save_model(self, path):
        torch.save(self.actor_critic.state_dict(), path)
    
    def load_model(self, path):
        self.actor_critic.load_state_dict(torch.load(path))
    
    def train(self, n_episodes, max_steps_per_episode=1000):
        best_score = -float('inf')
        scores = []
        avg_scores = []
        
        for episode in range(n_episodes):
            observation, _ = self.env.reset()
            done = False
            score = 0
            steps = 0
            
            while not done and steps < max_steps_per_episode:
                action, prob, entropy, val = self.actor_critic.get_action(observation)
                next_observation, reward, terminated, truncated, _ = self.env.step(action)
                done = terminated or truncated
                
                # 修改奖励以增强稳定性的信号
                # 在CartPole中，杆子越直立越好
                x, x_dot, theta, theta_dot = next_observation
                # 角度越接近0越好
                angle_reward = 1.0 - abs(theta) * 2  # theta通常在[-0.5, 0.5]范围内
                # 速度越小越稳定
                speed_penalty = -0.1 * (abs(x_dot) + abs(theta_dot))
                modified_reward = reward + 0.1 * angle_reward + 0.05 * speed_penalty
                
                self.store_transition(observation, action, prob, val, modified_reward, done)
                score += reward  # 记录原始奖励用于评估
                steps += 1
                self.total_steps += 1
                
                # 更新学习率
                if self.use_lr_decay and self.total_steps % 1000 == 0:
                    current_lr = self.update_lr()
                    self.writer.add_scalar('Params/learning_rate', current_lr, self.total_steps)
                
                if steps % self.memory.batch_size == 0 or done:
                    if len(self.memory.states) >= self.memory.batch_size:
                        self.learn()
                
                observation = next_observation
            
            scores.append(score)
            # 计算最近100回合的平均得分
            recent_scores = scores[-100:] if len(scores) >= 100 else scores
            avg_score = np.mean(recent_scores)
            avg_scores.append(avg_score)
            
            self.writer.add_scalar('Reward/episode', score, episode)
            self.writer.add_scalar('Reward/average', avg_score, episode)
            
            # 保存最佳模型和定期保存模型
            if avg_score > best_score:
                best_score = avg_score
                self.save_model('best_ppo_model.pth')
            
            if (episode + 1) % 100 == 0:
                self.save_model(f'ppo_model_ep{episode+1}.pth')
                print(f"模型已保存: ppo_model_ep{episode+1}.pth")
            
            print(f'Episode: {episode} | Score: {score:.2f} | Avg Score: {avg_score:.2f} | Steps: {steps}')
            
            # 提前停止条件：如果已经解决了环境
            if avg_score >= 495 and len(recent_scores) >= 100:
                print(f"环境在{episode}回合后被认为已解决，平均得分: {avg_score:.2f}")
                self.save_model('solved_ppo_model.pth')
                break
        
        # 保存最后一次训练的模型
        self.save_model('final_ppo_model.pth')
        print("训练完成，已保存最终模型: final_ppo_model.pth")
    
    def evaluate(self, episodes=10, render=True):
        total_reward = 0
        
        for episode in range(episodes):
            observation, _ = self.env.reset()
            done = False
            ep_reward = 0
            steps = 0
            
            while not done:
                if render:
                    self.env.render()
                
                action, _, _, _ = self.actor_critic.get_action(observation)
                observation, reward, terminated, truncated, _ = self.env.step(action)
                done = terminated or truncated
                ep_reward += reward
                steps += 1
            
            total_reward += ep_reward
            print(f'Episode {episode}: 得分 = {ep_reward:.2f}, 步数 = {steps}')
        
        avg_reward = total_reward / episodes
        print(f'在{episodes}回合的平均得分: {avg_reward:.2f}')
        return avg_reward

if __name__ == '__main__':
    # 创建CartPole环境
    env = gym.make('CartPole-v1')
    
    # 超参数
    hidden_dim = 256  # 更大的网络
    lr = 0.0005
    gamma = 0.99
    gae_lambda = 0.95
    policy_clip = 0.2
    batch_size = 128  # 更大的批量
    n_epochs = 4  # 更少的迭代以防过拟合
    n_episodes = 1000
    entropy_coef = 0.01
    value_coef = 0.5
    max_grad_norm = 0.5
    normalize_advantage = True
    use_lr_decay = True
    end_lr = 1e-5
    max_steps = 200000
    
    # 创建并训练PPO智能体
    agent = PPOAgent(
        env=env,
        hidden_dim=hidden_dim,
        lr=lr,
        gamma=gamma,
        gae_lambda=gae_lambda,
        policy_clip=policy_clip,
        batch_size=batch_size,
        n_epochs=n_epochs,
        entropy_coef=entropy_coef,
        value_coef=value_coef,
        max_grad_norm=max_grad_norm,
        normalize_advantage=normalize_advantage,
        use_lr_decay=use_lr_decay,
        end_lr=end_lr,
        max_steps=max_steps
    )
    
    # 训练智能体
    agent.train(n_episodes=n_episodes)
    
    # 评估智能体
    env_eval = gym.make('CartPole-v1', render_mode='human')
    agent_eval = PPOAgent(env=env_eval, hidden_dim=hidden_dim)
    agent_eval.load_model('best_ppo_model.pth')
    agent_eval.evaluate(episodes=5)
    
    # 关闭环境
    env.close()
    env_eval.close()
