import argparse
import os
import sys
import torch
import shutil
import numpy as np

def get_args():
    parser = argparse.ArgumentParser(description='RL')
    
    # environment
    parser.add_argument('--env_name', default='simple_formation', help='one from {simple_spread, simple_formation, simple_line})')
    parser.add_argument('--num_agents', type=int, default=9)
    parser.add_argument('--masking', action='store_true', help='restrict communication to within some threshold')
    parser.add_argument('--mask_dist', type=float, default=1.0, help='distance to restrict comms')
    parser.add_argument('--dropout_masking', action='store_true', help='dropout masking enabled')
    parser.add_argument('--entity_mp', action='store_true', help='enable entity message passing')
    parser.add_argument('--identity_size', default=0, type=int, help='size of identity vector')

    # training 
    parser.add_argument('--seed', type=int, default=77, help='random seed (default: None)')
    parser.add_argument('--num_processes', type=int, default=64, help='how many training CPU processes to use (default: 32)')
    parser.add_argument('--num_steps', type=int, default=128, help='number of forward steps in PPO (default: 128)')
    parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training')
    parser.add_argument('--num_frames', type=int, default=int(50e6), help='number of frames to train (default: 50e6)')
    parser.add_argument('--arena_size', type=int, default=1, help='size of arena')

    # evaluation
    parser.add_argument('--num_eval_episodes', type=int, default=30, help='number of episodes to evaluate with')
    parser.add_argument('--dist_threshold', type=float, default=0.1, help='distance within landmark is considered covered (for simple_spread)')
    parser.add_argument('--render', action='store_true')
    parser.add_argument('--record_video', action='store_true', default=False, help='record evaluation video')
    
    # PPO
    parser.add_argument('--algo', default='ppo', help='algorithm to use: a2c | ppo | acktr')
    parser.add_argument('--lr', type=float, default=1e-4, help='learning rate (default: 1e-4)')
    parser.add_argument('--gamma', type=float, default=0.99, help='discount factor for rewards (default: 0.99)')
    parser.add_argument('--tau', type=float, default=0.95, help='gae parameter (default: 0.95)')
    parser.add_argument('--entropy_coef', type=float, default=0.01, help='entropy term coefficient (default: 0.01)')
    parser.add_argument('--value_loss_coef', type=float, default=0.5, help='value loss coefficient (default: 0.05)')
    parser.add_argument('--max_grad_norm', type=float, default=0.5, help='max norm of gradients (default: 0.5)')
    parser.add_argument('--ppo_epoch', type=int, default=4, help='number of ppo epochs (default: 4)')
    parser.add_argument('--num_mini_batch', type=int, default=32, help='number of batches for ppo (default: 32)')
    parser.add_argument('--clip_param', type=float, default=0.2, help='ppo clip parameter (default: 0.2)')
    
    # logging
    parser.add_argument('--save_dir', default='tmp', help='directory to save models (default: tmp)')
    parser.add_argument('--log_dir', default='logs', help='directory to save logs')
    parser.add_argument('--save_interval', type=int, default=200, help='save interval, one save per n updates (default: 200)')
    parser.add_argument('--log_interval', type=int, default=0, help='log interval, one log per n updates (default: 10)')
    
    # Miscellaneous
    parser.add_argument('--test', action='store_true')
    parser.add_argument('--load_dir', default=None, help='filename to load all policies from')
    parser.add_argument('--eval_interval', default=50, type=int)
    parser.add_argument('--continue_training', action='store_true')

    # we always set these to TRUE, so automating this
    parser.add_argument('--no_clipped_value_loss', action='store_true')
    
    args = parser.parse_args()

    np.set_printoptions(suppress=True, precision=4)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    args.clipped_value_loss = not args.no_clipped_value_loss

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    args.device = torch.device("cuda" if args.cuda else "cpu")
    args.save_dir = './marlsave/save_new/'+args.save_dir
    args.log_dir = args.save_dir + '/' + args.log_dir

    if args.continue_training:
        assert args.load_dir is not None and os.path.exists(args.load_dir), \
        "Please specify valid model file to load if you want to continue training"

    if args.identity_size > 0:
        assert args.identity_size >= args.num_agents, 'identity size should either be 0 or >= number of agents!'

    if not args.masking:
        args.mask_dist = None
    elif args.masking and args.dropout_masking:
        args.mask_dist = -10
        
    # raise warning if save directory already exists
    if not args.test:
        if os.path.exists(args.save_dir):
            shutil.rmtree(args.save_dir)

        os.makedirs(args.save_dir)
    
    return args
