import gym
import imageio
import matplotlib.pyplot as plt
from stable_baselines3 import PPO
from stable_baselines3.ppo.policies import MlpPolicy
from stable_baselines3.common.env_util import make_vec_env

# 创建向量化环境
env = make_vec_env('CartPole-v1', n_envs=4)


# 自定义网络结构
class CustomMlpPolicy(MlpPolicy):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, net_arch=[dict(pi=[128, 128], vf=[128, 128])], **kwargs)


# 调整超参数
model = PPO(CustomMlpPolicy, env, verbose=1, device='cpu',
            learning_rate=0.0003,
            gamma=0.99,
            n_steps=2048,
            batch_size=64,
            n_epochs=10)

# 用于存储每一轮的总奖励
episode_rewards = []

# 多次训练
total_timesteps = 100000
for _ in range(5):
    model.learn(total_timesteps=total_timesteps)

    # 保存模型
    model.save("ppo_cartpole_mlp")

    # 评估模型并记录奖励
    obs = env.reset()
    episode_reward = 0
    while True:
        action, _states = model.predict(obs)
        obs, rewards, dones, info = env.step(action)
        episode_reward += rewards[0]
        if dones[0]:
            episode_rewards.append(episode_reward)
            episode_reward = 0
            break

# 加载模型
loaded_model = PPO.load("ppo_cartpole_mlp", device='cpu')

# 创建非向量化环境用于测试和录制视频
test_env = gym.make('CartPole-v1', render_mode="rgb_array")

# 用于存储每一帧图像
frames = []

# 测试模型
obs = test_env.reset()[0]
for _ in range(1000):
    action, _states = loaded_model.predict([obs])
    obs, rewards, terminated, truncated, info = test_env.step(action[0])
    done = terminated or truncated
    frame = test_env.render()  # 获取当前帧图像
    frames.append(frame)  # 将帧图像添加到列表中
    if done:
        obs = test_env.reset()[0]

# 关闭环境
test_env.close()

# 使用imageio库保存视频
with imageio.get_writer('cartpole_video.mp4', fps=60) as writer:
    for frame in frames:
        writer.append_data(frame)

# 绘制奖励曲线
plt.plot(episode_rewards)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
plt.title('Reward Curve during Training')
plt.show()
