import os
from datetime import datetime

import gymnasium as gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions.normal import Normal
from torch.utils.tensorboard import SummaryWriter

# 设置设备（CPU或GPU）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 训练参数
max_episodes = 5000
max_steps = 1000
update_interval = 2000  # 每收集这么多步骤后更新一次
gamma = 0.99
lambd = 0.95  # GAE参数
epsilon = 0.2  # PPO裁剪参数
actor_lr = 5e-5
critic_lr = 1e-4
epochs = 10  # 每次更新时的训练轮数
batch_size = 128

# 初始化环境
env = gym.make("Pendulum-v1", max_episode_steps=max_steps)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high[0]  # Pendulum的动作范围是[-2, 2]

# 设置随机种子
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
env.reset(seed=seed)


# Actor网络 (策略网络)
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dims=[128, 128]):
        super(Actor, self).__init__()

        # 构建特征提取网络
        layers = []
        input_dim = state_dim

        for hidden_dim in hidden_dims:
            layers.append(nn.Linear(input_dim, hidden_dim))
            layers.append(nn.ReLU())
            layers.append(nn.LayerNorm(hidden_dim))
            input_dim = hidden_dim

        self.feature_extractor = nn.Sequential(*layers)

        # 均值和标准差输出头
        self.mu_head = nn.Linear(hidden_dims[-1], action_dim)
        self.log_std_head = nn.Linear(hidden_dims[-1], action_dim)

        # 参数初始化
        for layer in self.feature_extractor:
            if isinstance(layer, nn.Linear):
                nn.init.orthogonal_(layer.weight, gain=np.sqrt(2))
                nn.init.constant_(layer.bias, 0)

        nn.init.orthogonal_(self.mu_head.weight, gain=0.01)
        nn.init.constant_(self.mu_head.bias, 0)

        nn.init.orthogonal_(self.log_std_head.weight, gain=0.01)
        nn.init.constant_(self.log_std_head.bias, 0)

    def forward(self, x):
        x = self.feature_extractor(x)
        mu = torch.tanh(self.mu_head(x))  # 输出在[-1, 1]之间
        log_std = self.log_std_head(x)
        log_std = torch.clamp(log_std, -20, 2)  # 防止标准差过大或过小
        std = torch.exp(log_std)
        return mu, std

    def get_action(self, state, deterministic=False):
        mu, std = self.forward(state)
        if deterministic:
            return mu
        else:
            dist = Normal(mu, std)
            action = dist.sample()
            return action, dist.log_prob(action).sum(dim=-1), dist.entropy().sum(dim=-1)

    def evaluate_actions(self, states, actions):
        mu, std = self.forward(states)
        dist = Normal(mu, std)
        return dist.log_prob(actions).sum(dim=-1), dist.entropy().sum(dim=-1)


# Critic网络 (值函数)
class Critic(nn.Module):
    def __init__(self, state_dim, hidden_dims=[128, 128]):
        super(Critic, self).__init__()

        layers = []
        input_dim = state_dim

        for hidden_dim in hidden_dims:
            layers.append(nn.Linear(input_dim, hidden_dim))
            layers.append(nn.ReLU())
            layers.append(nn.LayerNorm(hidden_dim))
            input_dim = hidden_dim

        layers.append(nn.Linear(hidden_dims[-1], 1))

        self.model = nn.Sequential(*layers)

        # 参数初始化
        for layer in self.model:
            if isinstance(layer, nn.Linear):
                nn.init.orthogonal_(layer.weight, gain=np.sqrt(2))
                nn.init.constant_(layer.bias, 0)

    def forward(self, x):
        return self.model(x).squeeze(-1)


# PPO算法
class PPO:
    def __init__(
        self,
        state_dim,
        action_dim,
        action_bound,
        gamma=0.99,
        lambd=0.95,
        epsilon=0.2,
        actor_lr=3e-4,
        critic_lr=1e-3,
        epochs=10,
        batch_size=64,
        device=device,
    ):
        self.device = device
        self.actor = Actor(state_dim, action_dim).to(device)
        self.critic = Critic(state_dim).to(device)
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=critic_lr)

        self.gamma = gamma
        self.lambd = lambd
        self.epsilon = epsilon
        self.epochs = epochs
        self.batch_size = batch_size
        self.action_bound = action_bound

        # TensorBoard记录器
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        self.writer = SummaryWriter(f"runs/PPO_Pendulum_{timestamp}")

    def select_action(self, state):
        state = torch.FloatTensor(state).to(self.device)
        with torch.no_grad():
            action, log_prob, _ = self.actor.get_action(state)
        # 在这里对动作进行缩放
        scaled_action = action * self.action_bound
        return scaled_action.cpu().numpy(), log_prob.cpu().item()

    def compute_gae(self, rewards, values, next_value, dones):
        # 计算广义优势估计 (Generalized Advantage Estimation)
        returns = []
        gae = 0
        for step in reversed(range(len(rewards))):
            if step == len(rewards) - 1:
                next_value = next_value
            else:
                next_value = values[step + 1]

            delta = (
                rewards[step]
                + self.gamma * next_value * (1 - dones[step])
                - values[step]
            )
            gae = delta + self.gamma * self.lambd * (1 - dones[step]) * gae
            returns.insert(0, gae + values[step])

        return returns

    def update(self, states, actions, log_probs, returns, advantages):
        # 转换为张量
        states = torch.FloatTensor(states).to(self.device)
        actions = torch.FloatTensor(actions).to(self.device)
        old_log_probs = torch.FloatTensor(log_probs).to(self.device)
        returns = torch.FloatTensor(returns).to(self.device)
        advantages = torch.FloatTensor(advantages).to(self.device)

        # 标准化优势
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        # 多轮次更新
        for _ in range(self.epochs):
            # 创建数据加载器进行小批量训练
            dataset_size = len(states)
            indices = np.arange(dataset_size)
            np.random.shuffle(indices)

            for start_idx in range(0, dataset_size, self.batch_size):
                # 获取小批量数据
                idx = indices[start_idx : start_idx + self.batch_size]

                batch_states = states[idx]
                batch_actions = actions[idx]
                batch_old_log_probs = old_log_probs[idx]
                batch_returns = returns[idx]
                batch_advantages = advantages[idx]

                # 计算新的动作概率
                new_log_probs, entropy = self.actor.evaluate_actions(
                    batch_states, batch_actions
                )

                # 计算比率和裁剪目标
                ratio = torch.exp(new_log_probs - batch_old_log_probs)
                surr1 = ratio * batch_advantages
                surr2 = (
                    torch.clamp(ratio, 1.0 - self.epsilon, 1.0 + self.epsilon)
                    * batch_advantages
                )

                # 策略损失 (取负值因为我们要最大化)
                actor_loss = -torch.min(surr1, surr2).mean()

                # 熵损失 (鼓励探索)
                entropy_loss = -0.005 * entropy.mean()

                # 总的actor损失
                total_actor_loss = actor_loss + entropy_loss

                # 更新策略网络
                self.actor_optimizer.zero_grad()
                total_actor_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.actor.parameters(), max_norm=0.5)
                self.actor_optimizer.step()

                # 计算值函数损失
                value_preds = self.critic(batch_states)
                critic_loss = nn.MSELoss()(value_preds, batch_returns)

                # 更新值函数网络
                self.critic_optimizer.zero_grad()
                critic_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.critic.parameters(), max_norm=0.5)
                self.critic_optimizer.step()

        return actor_loss.item(), critic_loss.item(), entropy_loss.item()


# 初始化PPO
ppo = PPO(
    state_dim,
    action_dim,
    action_bound=action_bound,
    gamma=gamma,
    lambd=lambd,
    epsilon=epsilon,
    actor_lr=actor_lr,
    critic_lr=critic_lr,
    epochs=epochs,
    batch_size=batch_size,
)

# 训练循环
total_steps = 0
for episode in range(max_episodes):
    state = env.reset()[0]
    episode_reward = 0

    # 收集一个episode的轨迹
    states = []
    actions = []
    rewards = []
    log_probs = []
    dones = []
    values = []

    for step in range(max_steps):
        total_steps += 1

        # 转换状态为张量并计算当前值
        state_tensor = torch.FloatTensor(state).to(device)
        with torch.no_grad():
            value = ppo.critic(state_tensor).item()

        # 选择动作
        action, log_prob = ppo.select_action(state)
        next_state, reward, terminated, truncated, _ = env.step(action)
        done = terminated or truncated

        # 存储轨迹
        states.append(state)
        actions.append(action)
        rewards.append(reward)
        log_probs.append(log_prob)
        dones.append(done)
        values.append(value)

        episode_reward += reward
        state = next_state

        if done:
            break

    # 计算下一个状态的值 (用于GAE计算)
    if done:
        next_value = 0
    else:
        next_state_tensor = torch.FloatTensor(next_state).to(device)
        with torch.no_grad():
            next_value = ppo.critic(next_state_tensor).item()

    # 计算GAE和returns
    returns = ppo.compute_gae(rewards, values, next_value, dones)
    advantages = [ret - val for ret, val in zip(returns, values)]

    # 更新策略和值函数
    actor_loss, critic_loss, entropy_loss = ppo.update(
        states, actions, log_probs, returns, advantages
    )

    # 记录到TensorBoard
    ppo.writer.add_scalar("Loss/Actor", actor_loss, episode)
    ppo.writer.add_scalar("Loss/Critic", critic_loss, episode)
    ppo.writer.add_scalar("Loss/Entropy", entropy_loss, episode)
    ppo.writer.add_scalar("Reward/Episode", episode_reward, episode)

    # 打印进度
    if episode % 10 == 0:
        print(
            f"Episode {episode}, Reward: {episode_reward:.2f}, "
            f"Actor Loss: {actor_loss:.4f}, Critic Loss: {critic_loss:.4f}"
        )

# 保存模型权重
model_dir = "saved_models"
os.makedirs(model_dir, exist_ok=True)
torch.save(ppo.actor.state_dict(), os.path.join(model_dir, "ppo_actor_pendulum.pth"))
torch.save(ppo.critic.state_dict(), os.path.join(model_dir, "ppo_critic_pendulum.pth"))
print(f"Models saved to {model_dir}")

# 关闭TensorBoard写入器
ppo.writer.close()
env.close()
