﻿#声明强化学习模型与环境的包
from stable_baselines3 import DQN
from stable_baselines3.common.callbacks import BaseCallback #回调函数，会用它来控制在训练过程中保存模型，记录实验数据
from stable_baselines3.common.evaluation import evaluate_policy #   定义最终评估函数
import gymnasium as gym

import numpy as np 
from collections import Counter
import matplotlib.pyplot as plt

#取消警告
import warnings
warnings.filterwarnings('ignore',category=UserWarning,message='Evaluation environment is not wrapped with a ``Monitor`` wrapper.')
import time


# 自定义回调，用于打印每轮得分，并每20个epoch保存一次模型，同时实现暂停功能
class CustomCallback(BaseCallback):
    def __init__(self, save_path, verbose=0):
        super().__init__(verbose)
        self.episode_rewards = []  # 存储每轮的总奖励
        self.epoch_counter = 0  # 记录完成的epoch数
        self.save_path = save_path  # 模型保存路径

    def on_training_end(self) -> None:
        # Total Reward vs Episode
        plt.figure()
        plt.plot(range(1, len(self.episode_rewards) + 1), self.episode_rewards, label="Total Reward")
        plt.xlabel("Episode")
        plt.ylabel("Total Reward")
        plt.title("Total Reward vs Episode")
        plt.legend()
        plt.savefig(f"{self.save_path}/reward_vs_episode.png")
        plt.close()
        print(f"Reward plot saved to {self.save_path}/reward_vs_episode.png")

        return 0
    
    def _on_step(self) -> bool:
        # reward = 0
        #判断是否在total_timesteps范围内
        if self.locals["dones"][0]: #检查当前训练是否结束
            # -----------------------使用期望统计奖励-------------------------------------------
            #  #记录当前回合奖励
            # rewards = self.locals['rewards'] 
            # # 统计每个奖励出现的概率，E=sum(rewards*每个奖励出现的概)/len(rewards） 
            # reward_counts=Counter(rewards)
            # #总奖励的个数
            # total_rewards = len(rewards)
            # #计算每个奖励出现的概率
            # probabilities = {reward: count / total_rewards for reward, count in reward_counts.items()}
            # # 计算总奖励的期望
            # expected_reward = sum(reward*probabilities[reward] for  reward in rewards)
            # #记录每轮的总奖励
            # self.episode_rewards.append(expected_reward)
        # ------------------------------使用蒙特卡罗方法统计奖励----------------------------------------------- 
            episode_rewards = sum(self.locals['rewards'])   #获取当前回合得分
            self.episode_rewards.append(episode_rewards)
            



            # 每20个epoch保存一次模型
            if self.epoch_counter % 20 == 0:
                self.model.save(f"{self.save_path}/dqn_lunarlander_{self.epoch_counter}")
                print(f"Model saved to {self.save_path}/dqn_lunarlander_{self.epoch_counter}")
        return True
    

def main():
    #定义一个存储路径，用于存放保存的模型与实验数据
    save_path= "./save_path/dqn"
    #实例化回调函数
    callback = CustomCallback(save_path=save_path)
    # 创建环境,gym中LunarLander-v2，gymsac中LunarLander-v3
    env = gym.make("LunarLander-v3", render_mode="human",max_episode_steps=50)
    # env使用蒙特卡罗法时自定义采样次数
    # env = TimeLimit(env,)

    # 定义网络
    model=DQN(
        policy = 'MlpPolicy', # 自定义策略，需要自己定义
        env = env,
        learning_rate = 0.001,
        buffer_size=100000,
        batch_size = 64,
        verbose = 1 # 打印训练过程中的信息
    )

    # 加载模型（如需要继续训练或评估）：https://stable-baselines3.readthedocs.io/en/master/
    #--------------------------------------------------------------

    # model_path = "saved_models/dqn/dqn_lunarlander_epoch_300.zip"
    # try:
    #     model = DQN.load(model_path, env=env)
    #     print("Model loaded successfully.")
    # except Exception as e:
    #     print(f"Failed to load model: {e}")
    # -----------------------------------------------------------
    try:
        print("start training...")
        model.learn(total_timesteps=5000000, callback=CustomCallback(save_path=save_path)) # 训练模型，100000个时间步长

    except KeyboardInterrupt:   #按下ctrl+C强制停止训练时，先保存当前模型与绘制回报值变化曲线
        print("KeyboardInterrupt!!!")
        #保存模型
        model.save(f'{save_path}/dqn_lunarlander_final')
        #绘制回报值变化曲线
        callback.on_training_end()
        mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=10)
        print(f"Mean reward: {mean_reward}, Standard deviation: {std_reward}")
        # 关闭环境
        env.close()

    finally:
        # 关闭环境
        env.close()


if __name__=="__main__":
    main()




