import gym
from stable_baselines3 import DDPG
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.vec_env import DummyVecEnv
import numpy as np
import matplotlib.pyplot as plt
import torch

# 创建多个 MountainCarContinuous-v0 环境实例以并行训练
num_envs = 4
env = DummyVecEnv([lambda: gym.make('MountainCarContinuous-v0') for _ in range(num_envs)])

# 获取动作空间维度
n_actions = env.action_space.shape[-1]
# 动作噪声标准差
action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions))

# 创建 DDPG 模型，使用 GPU 加速（如果可用）
model = DDPG("MlpPolicy", env, action_noise=action_noise, verbose=1,
             device="cuda" if torch.cuda.is_available() else "cpu",
             # 调整超参数以加速训练
             batch_size=256,  # 增大批量大小
             learning_rate=0.001,  # 调整学习率
             buffer_size=100000  # 调整经验回放缓冲区大小
             )

# 训练模型
num_timesteps = 100000
model.learn(total_timesteps=num_timesteps)

# 关闭并行环境
env.close()

# 创建单个环境用于测试
test_env = gym.make('MountainCarContinuous-v0')

# 测试模型并记录每回合奖励
num_episodes = 200
episode_rewards = []
for episode in range(num_episodes):
    obs = test_env.reset()[0]
    done = False
    total_reward = 0
    while not done:
        action, _states = model.predict(obs)
        obs, reward, terminated, truncated, _ = test_env.step(action)
        done = terminated or truncated
        total_reward += reward
    episode_rewards.append(total_reward)
    print(f"Episode {episode + 1}: Total Reward = {total_reward}")

test_env.close()

# 绘制每回合总奖励曲线
plt.plot(range(1, num_episodes + 1), episode_rewards)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
plt.title('Training Performance of DDPG on MountainCarContinuous-v0 using Stable - Baselines3')
plt.show()
