import gymnasium as gym

from stable_baselines3 import DQN
from stable_baselines3.common.evaluation import evaluate_policy

from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import VecNormalize, DummyVecEnv, SubprocVecEnv
from stable_baselines3 import PPO

# Create environment
# env = gym.make("LunarLander-v3", render_mode="rgb_array")           # 没有Wrapper

# 创建向量化环境和监控
env=make_vec_env("LunarLander-v3", n_envs=8, monitor_dir="./logs", env_kwargs={"render_mode": "rgb_array"}, wrapper_class=Monitor,vec_env_cls=DummyVecEnv)
env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.0)         # 标准化

# Instantiate the agent
model = PPO("MlpPolicy", env,
            learning_rate=1e-4,
            n_steps=2048,
            batch_size=64,
            gamma=0.99,
            ent_coef=0.01,
            verbose=1,
            device="cpu")
# Train the agent and display a progress bar
model.learn(total_timesteps=int(50_0000), progress_bar=True)
# Save the agent
model.save("dqn_lunar")
# save env，保存标准化参数，之后输入时就不要训练标准化了
env.save("vec_normalize.pkl")
env.close()
del model , env # delete trained model to demonstrate loading

# Load the trained agent
# NOTE: if you have loading issue, you can pass `print_system_info=True`
# to compare the system on which the model was trained vs the current one
# model = DQN.load("dqn_lunar", env=env, print_system_info=True)

# 加载保存的统计信息
vec_env = make_vec_env("LunarLander-v3", n_envs=1)
vec_env = VecNormalize.load("vec_normalize.pkl", vec_env)
# 测试时不更新统计信息
vec_env.training = False
# 测试时不需要奖励归一化
vec_env.norm_reward = False

model = PPO.load("dqn_lunar", env=vec_env)

# Evaluate the agent
# NOTE: If you use wrappers with your environment that modify rewards,
#       this will be reflected here. To evaluate with original rewards,
#       wrap environment in a "Monitor" wrapper before other wrappers.
mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10)

# Enjoy trained agent
vec_env = model.get_env()
obs = vec_env.reset()
for i in range(10000):
    action, _states = model.predict(obs, deterministic=True)
    obs, rewards, dones, info = vec_env.step(action)
    vec_env.render("human")