﻿import gymnasium as gym  # 强化学习环境库
from stable_baselines3 import DQN  # 深度Q网络算法
from stable_baselines3.common.evaluation import evaluate_policy  # 用于评估模型性能
from stable_baselines3.common.callbacks import BaseCallback  # 自定义回调需要继承 BaseCallback
import keyboard  # 用于检测按键事件（需要安装：pip install keyboard)
import time


# 自定义回调，用于打印每轮得分，并每20个epoch保存一次模型，同时实现暂停功能
class CustomCallback(BaseCallback):
    def __init__(self, save_path, verbose=0):
        super().__init__(verbose)
        self.episode_rewards = []  # 存储每轮的总奖励
        self.epoch_counter = 0  # 记录完成的epoch数
        self.save_path = save_path  # 模型保存路径
        self.paused = False  # 暂停状态

    def _on_step(self) -> bool:
        # 每个时间步都会调用
        if self.locals["dones"][0]:  # 检测当前回合是否结束
            episode_reward = sum(self.locals["rewards"])  # 获取当前回合总得分
            self.episode_rewards.append(episode_reward)
            self.epoch_counter += 1  # 计数器增加

            # 打印当前回合的奖励
            print(f"Episode {len(self.episode_rewards)}: Reward: {episode_reward}")

            # 每20个epoch保存一次模型
            if self.epoch_counter % 20 == 0:
                model_save_path = f"{self.save_path}/dqn_lunarlander_epoch_{self.epoch_counter}"
                self.model.save(model_save_path)
                print(f"Model saved at epoch {self.epoch_counter} to {model_save_path}")

        # 检测是否按下空格键以暂停训练
        if keyboard.is_pressed("space"):
            if not self.paused:
                print("Training paused. Press Space again to resume...")
                self.paused = True
                time.sleep(0.1) 
                while True:
                    if keyboard.is_pressed("space"):  # 再次按下空格时恢复训练
                        time.sleep(0.1) 
                        print("Training resumed.")
                        self.paused = False
                        break

        return True  # 返回 True 以继续训练


# 创建环境
env = gym.make("LunarLander-v3", render_mode="human")

# 创建 DQN 模型
model = DQN(
    policy="MlpPolicy",
    env=env,
    learning_rate=1e-3,
    buffer_size=100000,
    learning_starts=1000,
    batch_size=128,
    verbose=1,
) 

# 定义模型保存路径
save_path = "./saved_models/dqn/"  

# 加载模型（如需要继续训练或评估）
# model_path = "saved_models/dqn/dqn_lunarlander_epoch_100.zip"
# try:
    # model = DQN.load(model_path, env=env)
#     print("Model loaded successfully.")
# except Exception as e:
#     print(f"Failed to load model: {e}")



# 开始训练
print("Starting training...")
model.learn(
    total_timesteps=500000,
    callback=CustomCallback(save_path=save_path)  # 使用自定义回调
)

# 保存最终模型
model.save(f"{save_path}/dqn_lunarlander_final")

# 评估模型性能
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=10)
print(f"Mean reward: {mean_reward}, Std reward: {std_reward}")

# 关闭环境
env.close()
