import torch

from buffer.ReplayBuffer import ReplayBuffer
from train import Trainer
import argparse
import gym
from agent.AgentDQN import DQN
from agent.AgentDoubleDQN import DoubleDQN
from agent.AgentD3QN import D3QN

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--env_name", type=str, default="CartPole-v1")
    parser.add_argument("--agent_name", type=str)
    parser.add_argument("--learning_rate", type=float, default=3e-5)
    parser.add_argument("--buffer_size", type=int, default=2**17)
    parser.add_argument("--internal_log", type=int, default=50, help="每隔多少个episode打印日志")
    parser.add_argument("--evaluate_internal", type=int, default=10, help="每隔多少个episode评估一下模型")
    parser.add_argument("--record_path_pre", type=str, default="logs/", help="存储tensorboard日志文件的地方")
    parser.add_argument("--agent_type", type=str, default="offPolicy", help="Agent类型")
    parser.add_argument("--minimal_size", type=int, default=1000, help="offPolicy下开始更新时，buffer的最小容量")
    parser.add_argument("--model_path_pre", type=str, default="models/", help="模型保存位置以及名字")
    parser.add_argument("--sampling_strategy", type=str, default="boltzmann", help="使用什么采样策略，部分贪婪还是玻尔兹曼")
    parser.add_argument("--maxCount", type=int, default=1e6, help="使用指数衰减,衰减到固定值所需步长")
    parser.add_argument("--epsilon", type=float, default=0.2)
    parser.add_argument("--temperature", type=float, default=5, help="玻尔兹曼分布温度系数")
    parser.add_argument("--tau", type=float, default=0.005, help="软更新参数")
    parser.add_argument("--num_episodes", type=int, default=500)
    args = parser.parse_args()
    env_names = ['CartPole-v0']
    for env_name in env_names:
        for st in ['epsilon', 'boltzmann']:
            args.sampling_strategy = st
            args.agent_name = "DoubleDQN"
            args.model_path = args.model_path_pre + args.agent_name + "_" + env_name + "_" + args.sampling_strategy + ".pt"
            args.record_path = args.record_path_pre + args.agent_name + "_" + env_name + "_" + args.sampling_strategy
            args.env_name = env_name
            args.batch_size = 128

            env = gym.make(env_name)
            state_dim = env.observation_space.shape[0]
            action_dim = env.action_space.n
            hidden_dim = 128
            hidden_layers = 2
            gamma = 0.99
            device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
            args.device = device
            replay_buffer = ReplayBuffer(args.buffer_size)

            # agent = DQN(state_dim, hidden_dim, action_dim, hidden_layers, lr, gamma, device, args=args)
            agent = DoubleDQN(state_dim, hidden_dim, action_dim, hidden_layers, args.lr, gamma, device, args=args)
            # agent = D3QN(state_dim, hidden_dim, action_dim, hidden_layers, args.learning_rate, gamma, device, args=args)
            trainer = Trainer(env, agent, replay_buffer, args)
            trainer.train()
