import os
from datetime import datetime

import gymnasium as gym
import numpy as np
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter

# 导入模型
from models import PolicyNetwork

# 设置设备（CPU或GPU）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 训练参数
max_episodes = 5000
max_steps = 1000
gamma = 0.99
lr = 1e-3

# 初始化环境
env = gym.make("Pendulum-v1", max_episode_steps=max_steps)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high[0]  # Pendulum的动作范围是[-2, 2]

# 设置随机种子
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
env.reset(seed=seed)


# VPG算法
class VPG:
    def __init__(
        self, state_dim, action_dim, action_bound, gamma=0.99, lr=1e-3, device=device
    ):
        self.device = device
        # 现在PolicyNetwork不再接收action_bound参数
        self.policy = PolicyNetwork(state_dim, action_dim).to(device)
        self.optimizer = optim.Adam(self.policy.parameters(), lr=lr)
        self.gamma = gamma
        self.action_bound = action_bound  # 在VPG类中存储action_bound

        # TensorBoard记录器
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        self.writer = SummaryWriter(f"runs/VPG_Pendulum_{timestamp}")

    def select_action(self, state):
        state = torch.FloatTensor(state).to(self.device)
        action, log_prob = self.policy.get_action(state)
        # 在这里对动作进行缩放
        scaled_action = action * self.action_bound
        return scaled_action.item(), log_prob.cpu()

    def train(self, rewards, log_probs):
        # 计算折扣回报
        discounted_rewards = []
        R = 0
        for r in reversed(rewards):
            R = r + self.gamma * R
            discounted_rewards.insert(0, R)

        # 归一化回报 (减少方差)
        discounted_rewards = torch.FloatTensor(discounted_rewards).to(self.device)
        discounted_rewards = (discounted_rewards - discounted_rewards.mean()) / (
            discounted_rewards.std() + 1e-9
        )

        # 计算策略梯度损失
        policy_loss = []
        for log_prob, R in zip(log_probs, discounted_rewards):
            policy_loss.append(-log_prob * R)  # 负号因为我们要最大化

        # 更新策略网络
        self.optimizer.zero_grad()
        policy_loss = torch.stack(policy_loss).sum()
        policy_loss.backward()
        self.optimizer.step()

        return policy_loss.item()


# 初始化VPG
vpg = VPG(state_dim, action_dim, action_bound=action_bound, gamma=gamma, lr=lr)

# 训练循环
for episode in range(max_episodes):
    state = env.reset()[0]
    rewards = []
    log_probs = []

    for step in range(max_steps):
        action, log_prob = vpg.select_action(state)
        next_state, reward, done, _, _ = env.step([action])

        rewards.append(reward)
        log_probs.append(log_prob)
        state = next_state

        if done:
            break

    # 计算总回报和策略损失
    total_reward = sum(rewards)
    policy_loss = vpg.train(rewards, log_probs)

    # 记录到TensorBoard
    vpg.writer.add_scalar("Loss/Policy_Loss", policy_loss, episode)
    vpg.writer.add_scalar("Reward/Episode_Reward", total_reward, episode)

    # 打印进度
    if episode % 10 == 0:
        print(
            f"Episode {episode}, Total Reward: {total_reward:.2f}, Policy Loss: {policy_loss:.2f}"
        )

# 保存模型权重
model_dir = "saved_models"
os.makedirs(model_dir, exist_ok=True)
torch.save(vpg.policy.state_dict(), os.path.join(model_dir, "vpg_pendulum.pth"))
print(f"Model saved to {model_dir}/vpg_pendulum.pth")

# 关闭TensorBoard写入器
vpg.writer.close()
env.close()
