import gym
import numpy as np
import matplotlib.pyplot as plt
from stable_baselines3 import REINFORCE

# 创建 CartPole 环境
env = gym.make('CartPole-v1')

# 创建 REINFORCE 模型
model =  REINFORCE('MlpPolicy', env, learning_rate=0.0005, buffer_size=50000,
             exploration_initial_eps=1.0, exploration_final_eps=0.01,
             gamma=0.99, verbose=1, tensorboard_log="./dqn_cartpole_tensorboard/")

# 训练模型
model.learn(total_timesteps=50000)

# 测试训练效果
test_episodes = 10
total_rewards = []
for episode in range(test_episodes):
    obs = env.reset()
    done = False
    episode_reward = 0
    while not done:
        action, _states = model.predict(obs, deterministic=True)
        obs, reward, done, info = env.step(action)
        episode_reward += reward
    total_rewards.append(episode_reward)
    print(f"测试回合 {episode + 1}，累计奖励：{episode_reward}")

print(f"平均测试奖励：{np.mean(total_rewards)}")

# 训练过程奖励曲线（使用内置 monitor wrapper）
from stable_baselines3.common.monitor import Monitor
env = Monitor(gym.make('CartPole-v1'))
model = REINFORCE('MlpPolicy', env, learning_rate=0.0005, verbose=1, tensorboard_log="./dqn_cartpole_tensorboard/")
model.learn(total_timesteps=50000)

# 绘制奖励曲线（训练日志在 tensorboard_log 文件夹中）
import pandas as pd
import os

log_dir = "./dqn_cartpole_tensorboard/"
monitor_files = [f for f in os.listdir(log_dir) if f.startswith('monitor')]

# 加载最新的监控文件
monitor_data = pd.read_csv(log_dir + monitor_files[0], skiprows=1)
plt.figure(figsize=(10, 6))
plt.plot(monitor_data['r'], label='Episode Reward')
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.title('Reward per Episode during Training')
plt.legend()
plt.grid()
plt.show()