from pathlib import Path
import pybullet_envs_gymnasium
from stable_baselines3.common.vec_env import VecNormalize
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv
import gymnasium as gym
import os

if __name__ == "__main__":
    env_id = "HalfCheetahBulletEnv-v0"
    num_cpu = 4  # Number of processes to use

    # 创建向量化环境并设置渲染模式
    vec_env = make_vec_env(env_id, n_envs=num_cpu, env_kwargs={"render_mode": "human"}, vec_env_cls=SubprocVecEnv)

    # 自动归一化输入特征和奖励
    vec_env = VecNormalize(vec_env, norm_obs=True, norm_reward=True, clip_obs=10.0)

    model = PPO("MlpPolicy", vec_env)
    model.learn(total_timesteps=2000)

    # 关闭训练时的环境窗口
    vec_env.close()

    # 设置保存目录为当前文件夹下的 sb3_models 文件夹
    current_dir = Path.cwd()
    save_dir = current_dir / "sb3_models"
    save_dir.mkdir(parents=True, exist_ok=True)

    log_dir = save_dir
    model_save_path = log_dir / "ppo_halfcheetah"
    stats_save_path = log_dir / "vec_normalize.pkl"

    try:
        # 保存模型
        model.save(model_save_path)
        # 保存环境统计信息
        vec_env.save(stats_save_path)
        print(f"Model saved to {model_save_path}")
        print(f"Stats saved to {stats_save_path}")
    except Exception as e:
        print(f"Error saving files: {e}")

    # 清除之前的模型和环境对象
    del model, vec_env

    # 加载保存的统计信息
    vec_env = make_vec_env(env_id, n_envs=1)
    vec_env = VecNormalize.load(stats_save_path, vec_env)
    # 测试时不更新统计信息
    vec_env.training = False
    # 测试时不需要奖励归一化
    vec_env.norm_reward = False

    # 加载模型
    model = PPO.load(model_save_path, env=vec_env)

    # 创建一个新的环境实例用于测试控制效果
    test_env = gym.make(env_id, render_mode="human")
    obs, _ = test_env.reset()
    for _ in range(1000):
        action, _states = model.predict(obs)
        obs, rewards, terminated, truncated, _ = test_env.step(action)
        done = terminated or truncated
        if done:
            obs, _ = test_env.reset()
    test_env.close()