# REINFORCE算法
# 状态连续：如Box([-1.2  -0.07], [0.6  0.07], (2,), float32) numpy.ndarray
# 动作连续：如Box(-1.0, 1.0, (4,), float32) numpy.ndarray
import os
import torch
import numpy as np
import torch.nn as nn
from tqdm import tqdm
import gymnasium as gym
import matplotlib.pyplot as plt
from config import (GAME_NAME,GAME_UPPER_STEP,HIDDEN_DIM,DEVICE,LEARNING_RATE,DISCOUNT_FACTOR,NUM_EPISODES,REWARD_SCALE)

class PolicyNet(nn.Module):
    """ 策略网络：输入状态，输出动作 """
    def __init__(self,
        state_dim: int,
        hidden_dim: int,
        action_dim: int,
        action_low: np.ndarray,
        action_high: np.ndarray
    ):
        super().__init__()
        # 把不需要梯度、但需要和模型一起保存/加载/设备迁移的张量 注册为 buffer
        self.register_buffer('action_low', torch.tensor(action_low, dtype=torch.float32))
        self.register_buffer('action_high', torch.tensor(action_high, dtype=torch.float32))
        self.net = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
        )
        self.mean_head = nn.Linear(hidden_dim, action_dim)
        self.log_std = nn.Parameter(torch.zeros(action_dim))

    def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: # torch.Size([B, state_dim])
        features = self.net(x) # torch.Size([B, hidden_dim])
        mean = self.mean_head(features) # torch.Size([B, action_dim]) 无界
        std = torch.exp(self.log_std.clamp(-20, 2)) # torch.Size([action_dim])
        return mean, std # (torch.Size([B, action_dim]), torch.Size([action_dim]))

    def sample_action(self, state: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
        mean, std = self.forward(state) # (torch.Size([B, action_dim]),torch.Size([action_dim]))
        dist = torch.distributions.Normal(mean, std) # 多维独立高斯分布，标准差会自动广播第一个维度为B
        z = dist.rsample() # torch.Size([B, action_dim])并且无界
        # .rsample(),可微分采样,z的梯度可以回传到mean和std
        # .sample(),则梯度无法传播

        action_tanh = torch.tanh(z) # torch.Size([B, action_dim]) 将z的每一维从(-∞, +∞)压缩到(-1, 1)
        action_scaled = (action_tanh + 1) * 0.5 * (self.action_high - self.action_low) + self.action_low
        # torch.Size([B, action_dim]) 将action_tanh从(-1, 1)映射到规定动作范围

        # dist.log_prob(z) 得到 torch.Size([B, action_dim])
        # .sum(dim=-1, keepdim=True) 沿最后1维求和，并保持其他维度的形状不变
        log_prob = dist.log_prob(z).sum(dim=-1, keepdim=True) # torch.Size([B, 1])
        log_prob -= torch.log(1 - action_tanh.pow(2) + 1e-6).sum(dim=-1, keepdim=True) # torch.Size([B, 1])

        return action_scaled, log_prob # (torch.Size([B, action_dim]), torch.Size([B, 1]))

class REINFORCE_Agent:
    ''' 基于REINFORCE算法的智能体 '''
    def __init__(self,
        state_dim: int,
        hidden_dim: int,
        action_dim: int,
        device: torch.device,
        learning_rate: float,
        discount_factor: float,
        action_low: np.ndarray,
        action_high: np.ndarray,
    ):
        self.state_dim = state_dim
        self.hidden_dim = hidden_dim
        self.action_dim = action_dim
        self.device = device
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.action_low = action_low
        self.action_high = action_high

        self.policy_net = PolicyNet(
            state_dim, hidden_dim, action_dim,
            action_low, action_high
        ).to(device)

        self.optimizer = torch.optim.Adam(self.policy_net.parameters(), lr=learning_rate)

        self.count = 0  # 记录策略网络更新次数

    def take_action(self, state: np.ndarray) -> tuple[np.ndarray, torch.Tensor]:
        """
        REINFORCE 要求知道 执行该动作时的 log π(a|s)，
        这个值必须在采样时计算并保存，不能在 update 时重新计算（因为动作是采样出来的，无法反推）
        """
        state_tensor = torch.tensor(state, dtype=torch.float32, device=self.device).unsqueeze(0) # torch.Size([1,state_dim])
        action, log_prob = self.policy_net.sample_action(state_tensor) # (torch.Size([B, action_dim]), torch.Size([B, 1]))

        # action.cpu().detach().numpy()[0] np.ndarray (action_dim,)
        # log_prob: torch.Size([1,1])
        return action.cpu().detach().numpy()[0], log_prob

    def update(self, trajectory_record: dict):
        rewards   = trajectory_record['rewards']   # list of float
        log_probs = trajectory_record['log_probs'] # list of torch.Size([1,1])

        returns = []
        G = 0.0
        for r in reversed(rewards):
            G = r + self.discount_factor * G # float
            returns.insert(0, G)
        returns = torch.tensor(returns, dtype=torch.float32, device=self.device)  # [T]
        returns = returns - returns.mean()  # 减去均值作为 baseline（降低方差）

        policy_losses = []
        for log_prob, G_val in zip(log_probs, returns):
            # log_prob: torch.Size([1,1]), G_val: tensor scalar → torch.Size([1,1])
            policy_losses.append(-log_prob * G_val)

        loss = torch.cat(policy_losses, dim=0).mean()  # torch.Size([])
        print("loss:", loss.item())

        self.optimizer.zero_grad()
        loss.backward()
        torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), max_norm=1.0)
        self.optimizer.step()

        self.count += 1

    def save_model(self, path: str):
        """保存策略网络、优化器状态和训练超参数"""
        torch.save({
            'policy_net_state_dict': self.policy_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'count': self.count
        }, path)
        print(f"Model saved to {path}")

    def load_model(self, path: str):
        """加载策略网络"""
        if not os.path.exists(path):
            print(f"No saved model found at {path}, starting from scratch.")
            return False
        checkpoint = torch.load(path, map_location=self.device, weights_only=True)
        self.policy_net.load_state_dict(checkpoint['policy_net_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.count = checkpoint.get('count', self.count)
        print(f"Model loaded from {path} and target network synchronized.")
        return True

def train_REINFORCE(
    env: gym.Env,
    agent: REINFORCE_Agent,
    num_episodes: int
):
    return_list = [] # 轨迹奖励

    for _ in tqdm(range(num_episodes), desc="Training Episodes"):
        episode_return = 0.0
        trajectory_record = {
            'rewards'    : [],
            'log_probs'  : [],
        }

        state, _ = env.reset()  # numpy.ndarray (state_dim,)
        done = False

        while not done:
            action, log_prob = agent.take_action(state) # numpy.ndarray (action_dim,) torch.Size([1,1])

            next_state, reward, terminated, truncated, _ = env.step(action)
            # next_state: numpy.ndarray (state_dim,)
            # reward: float
            # terminated truncated: bool
            done = terminated or truncated  # bool

            scaled_reward = float(reward) * REWARD_SCALE
            trajectory_record['rewards'  ].append(scaled_reward)
            trajectory_record['log_probs'].append(log_prob)

            episode_return += reward

            state = next_state

        return_list.append(episode_return)
        agent.update(trajectory_record)

        if len(return_list) % 10 == 0:
            plt.figure(figsize=(10, 5))
            plt.plot(return_list)
            plt.title("REINFORCE Training Returns")
            plt.xlabel("Episode")
            plt.ylabel("Return")
            plt.grid()
            plt.savefig(plot_path)
            plt.show()

    return return_list

if __name__ == "__main__":
    # 定义结果保存目录：results/{GAME_NAME}/
    results_dir = os.path.join("results", GAME_NAME)
    os.makedirs(results_dir, exist_ok=True)

    # 模型和图像保存路径
    model_path = os.path.join(results_dir, "reinforce_model.pth")
    plot_path = os.path.join(results_dir, "training_returns.png")

    # 创建环境
    env = gym.make(GAME_NAME, render_mode=None, max_episode_steps=GAME_UPPER_STEP)
    state_dim   = env.observation_space.shape[0]
    action_dim  = env.action_space.shape[0]
    action_low  = env.action_space.low # np.ndarray
    action_high = env.action_space.high # np.ndarray

    # 创建智能体
    agent = REINFORCE_Agent(
        state_dim=state_dim,
        hidden_dim=HIDDEN_DIM,
        action_dim=action_dim,
        device=DEVICE,
        learning_rate=LEARNING_RATE,
        discount_factor=DISCOUNT_FACTOR,
        action_low=action_low,
        action_high=action_high,
    )
    # 加载已有模型
    agent.load_model(model_path)

    # 训练
    return_list = train_REINFORCE(env=env, agent=agent, num_episodes=NUM_EPISODES)
    # 保存模型
    agent.save_model(model_path)

    # 关闭环境
    env.close()

    # 绘制训练曲线
    plt.figure(figsize=(10, 5))
    plt.plot(return_list)
    plt.title("REINFORCE Training Returns")
    plt.xlabel("Episode")
    plt.ylabel("Return")
    plt.grid()
    plt.savefig(plot_path)
    plt.show()
