import argparse
from Modules import DQN, D3QN, TD3, PPO
from route import RouteEnv

parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='train', type=str)               # mode = 'train' or 'test'
parser.add_argument('--load', default=False, type=bool)                # load model

parser.add_argument('--learning_rate', default=1e-3, type=float)
parser.add_argument('--critic_rate', default=1e-3, type=float)
parser.add_argument('--tau',  default=0.005, type=float)
parser.add_argument('--epsilon', default=0.1, type=float)
parser.add_argument('--gamma', default=0.9, type=float)
parser.add_argument('--capacity', default=1e6, type=int)
parser.add_argument('--batch_size', default=256, type=int)

parser.add_argument('--log_interval', default=5000, type=int)          # save interval
parser.add_argument('--max_episode', default=10000, type=int)          # num of games
parser.add_argument('--test_iteration', default=10, type=int)          # test iteration
parser.add_argument('--train_iteration', default=10000, type=int)      # train iteration

# D3QN
parser.add_argument('--eps_dec',  default=5e-7, type=float)
parser.add_argument('--eps_end',  default=0.01, type=float)
parser.add_argument('--fc1_dim',  default=256, type=int)
parser.add_argument('--fc2_dim',  default=256, type=int)

# TD3
parser.add_argument('--critic1_learning_rate', default=1e-3, type=float)
parser.add_argument('--critic2_learning_rate', default=1e-3, type=float)
parser.add_argument('--policy_noise', default=2, type=float)
parser.add_argument('--noise_clip', default=5, type=float)
parser.add_argument('--policy_delay', default=2, type=int)
parser.add_argument('--exploration_noise', default=5, type=float)

# PPO
parser.add_argument('--clip_param', default=0.2, type=float)
parser.add_argument('--max_grad_norm', default=0.5, type=float)
parser.add_argument('--ppo_epoch', default=10, type=int)
args = parser.parse_args()


if __name__ == '__main__':
    env = RouteEnv()
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n
    action_range = (env.action_low, env.action_high)

    # args.capacity = 2000
    # args.batch_size = 128
    # args.learning_rate = 1e-2
    # args.epsilon = 0.8
    # args.train_iteration = 100000
    # agent = DQN.NetWorkProxy(state_dim, action_dim, action_range, args)

    #agent = D3QN.NetWorkProxy(state_dim, action_dim, action_range, args)

    # args.capacity = 50000
    # args.batch_size = 100
    # args.learning_rate = 1e-4
    # args.critic1_learning_rate = 1e-3
    # args.critic2_learning_rate = 1e-3
    # args.train_iteration = 100000
    # agent = TD3.NetWorkProxy(state_dim, action_dim, action_range, args)

    args.learning_rate = 1e-3
    args.critic_rate = 4e-3
    #args.max_episode = 400
    args.capacity = 1000
    args.batch_size = 8
    args.train_iteration = 100000
    args.clip_param = 0.2
    args.max_grad_norm = 0.5
    args.gamma = 0.9
    args.exploration_noise = 25
    agent = PPO.NetWorkProxy(state_dim, action_dim, action_range, args)

    agent.game_loop(env)