from SaveEveryNTimesCallBack import *

from stable_baselines3 import DQN
import torch as th
import gym

env = gym.make("LunarLander-v2")

# 训练函数
def learn(env):

    #DQN超参数
    dqn_parameters = {
        'learning_rate': 0.0001,
        'buffer_size': 30000,
        'exploration_initial_eps': 1,
        'exploration_fraction': 0.5,
        'exploration_final_eps': 0.05,

        'tensorboard_log': './log/'
    }

    #迭代次数
    total_timesteps = 2500000

    #训练的模型名字，用于标识本次训练的模型与日志
    mode_name = 'net1024_1M5_ep5e-1_rl1e-4_buffersize3e4'

    learn_parameters = {
        'tb_log_name': mode_name,
        'total_timesteps': total_timesteps,
    }

    # 保存模型的回调函数
    saveCallback = SaveCallback(total_timesteps=total_timesteps,
                                times=100000,
                                printTimes=1000,
                                save_path='./save_model_'+mode_name)

    #自定义全连接神经网络为1024, 激活函数为ReLu
    policy_kwargs = dict(activation_fn=th.nn.ReLU,
                         net_arch=[1024, 1024])

    #构建模型
    model = DQN("MlpPolicy", env, verbose=1, policy_kwargs=policy_kwargs,  **dqn_parameters)

    #训练
    model.learn(log_interval=4, callback=saveCallback, **learn_parameters)

    #保存最终模型
    model.save("LunarLanderDQN")

#预测
def predict(env):
    model = DQN.load("./save_model_net1024_2M5_ep5e-1_rl1e-3_buffersize2e6/model_2500000.zip")

    obs = env.reset()
    while True:
        action, _states = model.predict(obs)
        obs, reward, done, info = env.step(action)
        env.render()
        if done:
            obs = env.reset()

def main():
    learn(env)
    predict(env)
#
main()