import gymnasium as gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical

# --- 1. 定义策略网络 (Actor) ---
class PolicyNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(PolicyNetwork, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc_actor = nn.Linear(128, action_dim) # 输出动作的 logits

    def forward(self, x):
        x = F.relu(self.fc1(x))
        action_logits = self.fc_actor(x)
        return action_logits

    def select_action(self, state):
        state = torch.from_numpy(state).float().unsqueeze(0)
        action_logits = self.forward(state)
        action_probs = F.softmax(action_logits, dim=-1)
        m = Categorical(action_probs)
        action = m.sample()
        log_prob = m.log_prob(action)
        return action.item(), log_prob

# --- 2. 定义值函数网络 (Critic) ---
class ValueNetwork(nn.Module):
    def __init__(self, state_dim):
        super(ValueNetwork, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc_critic = nn.Linear(128, 1) # 输出状态值 V(s)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        state_value = self.fc_critic(x)
        return state_value

# --- 3. REINFORCE with Baseline 算法主体 ---
def reinforce_with_baseline(env_name='CartPole-v1', actor_lr=0.005, critic_lr=0.01, gamma=0.99, num_episodes=2000, log_interval=100):
    env = gym.make(env_name)
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n

    actor_net = PolicyNetwork(state_dim, action_dim)
    critic_net = ValueNetwork(state_dim)

    actor_optimizer = optim.Adam(actor_net.parameters(), lr=actor_lr)
    critic_optimizer = optim.Adam(critic_net.parameters(), lr=critic_lr)

    episode_rewards_history = []

    for i_episode in range(num_episodes):
        # --- a. 采样一个完整的轨迹 ---
        state, _ = env.reset()
        saved_log_probs = []
        rewards = []
        states_for_critic = [] # 存储状态以供 Critic 使用
        
        done = False
        truncated = False
        current_episode_reward = 0

        while not (done or truncated):
            action, log_prob = actor_net.select_action(state)
            next_state, reward, done, truncated, _ = env.step(action)
            
            states_for_critic.append(state) # 存储当前状态
            saved_log_probs.append(log_prob)
            rewards.append(reward)
            current_episode_reward += reward
            state = next_state
        
        episode_rewards_history.append(current_episode_reward)

        # --- b. 计算未来折扣回报 G_t ---
        returns = []
        discounted_sum_reward = 0
        for r in reversed(rewards):
            discounted_sum_reward = r + gamma * discounted_sum_reward
            returns.insert(0, discounted_sum_reward)
        
        returns = torch.tensor(returns, dtype=torch.float32)
        # 标准化回报 G_t (可选，但通常对 Critic 训练有益)
        # returns = (returns - returns.mean()) / (returns.std() + 1e-9)

        # --- c. 计算优势 A_t = G_t - V(s_t) ---
        states_tensor = torch.tensor(np.array(states_for_critic), dtype=torch.float32)
        state_values = critic_net(states_tensor).squeeze() # V(s_t) from Critic
        advantages = returns - state_values # A_t

        # --- d. 更新 Critic 网络 ---
        critic_optimizer.zero_grad()
        # Critic 的损失是 V(s_t) 和 G_t 之间的均方误差
        # state_values 是 Critic 的输出，returns (G_t) 是目标
        value_loss = F.mse_loss(state_values, returns)
        value_loss.backward() # 计算 Critic 的梯度
        critic_optimizer.step() # 更新 Critic 网络

        # --- e. 更新 Actor 网络 ---
        actor_optimizer.zero_grad()
        policy_loss_terms = []
        # Actor 的损失使用优势函数 A_t
        # 重要: advantages.detach() 阻止梯度流向 Critic
        for log_prob, advantage in zip(saved_log_probs, advantages.detach()):
            policy_loss_terms.append(-log_prob * advantage)
        
        actor_loss = torch.stack(policy_loss_terms).sum()
        actor_loss.backward() # 计算 Actor 的梯度
        actor_optimizer.step() # 更新 Actor 网络


        if (i_episode + 1) % log_interval == 0:
            avg_reward = np.mean(episode_rewards_history[-log_interval:])
            print(f'Episode {i_episode+1}/{num_episodes}\tAverage Reward (last {log_interval}): {avg_reward:.2f}\tActor Loss: {actor_loss.item():.2f}\tCritic Loss: {value_loss.item():.2f}')
            if env_name == 'CartPole-v1' and avg_reward >= 475.0:
                print(f"Solved CartPole-v1 in {i_episode+1} episodes!")
                # torch.save(actor_net.state_dict(), 'actor_cartpole_baseline.pth')
                # torch.save(critic_net.state_dict(), 'critic_cartpole_baseline.pth')
                break
    
    env.close()
    return episode_rewards_history

# --- 4. 运行算法 ---
if __name__ == '__main__':
    rewards_history_baseline = reinforce_with_baseline(
        env_name='CartPole-v1', 
        actor_lr=0.001,  # 可能需要调整学习率
        critic_lr=0.005, # Critic 的学习率通常可以稍大一些
        gamma=0.99, 
        num_episodes=3000, # 可能需要更多 episodes
        log_interval=100
    )

    import matplotlib.pyplot as plt
    plt.plot(rewards_history_baseline)
    plt.xlabel('Episode')
    plt.ylabel('Total Reward')
    plt.title('REINFORCE with Baseline on CartPole-v1')
    
    window_size = 100
    moving_avg_baseline = [np.mean(rewards_history_baseline[i-window_size:i]) if i >= window_size else np.mean(rewards_history_baseline[0:i+1]) for i in range(len(rewards_history_baseline))]
    plt.plot(moving_avg_baseline, color='red', linestyle='--', label=f'Moving Average ({window_size} episodes)')
    plt.legend()
    plt.show()