import argparse
import numpy as np
from common.basic import create_env, init_seed, save_model
from algos.dis_redq import dis_redq
from algos.redq import redq
from algos.oneq_redq import oneq_redq
from algos.dredq import dredq
from algos.sredq import sredq
from common.log import logger, cal_mean_std
import torch

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--env', type = str, default = 'Hopper-v2', help = 'enviroment\'s name')
    parser.add_argument('--epochs', type = int, default = 100, help = 'the number of train epoch')
    parser.add_argument('--seed', '-s', type = int, default = 0, help = 'init seed')
    parser.add_argument('--Qnums', type = int, default = 10, help = 'ensemble Q number')
    parser.add_argument('-M', type = int, default = 2, help = 'in-target minimization parameter M')
    parser.add_argument('--alpha', type = np.float32, default = 0.2, help = 'init alpha')
    parser.add_argument('--UTD', type = int, default = 20, help = 'UTD number')
    parser.add_argument('--qmode', type = np.str, default = "min", help = 'q target mode')
    parser.add_argument('--gamma', type = np.float32, default = 0.99, help = 'reward discount')
    parser.add_argument('--tau', type = np.float32, default = 0.995, help = 'target smoothing coefficient')
    parser.add_argument('--dir', type = np.str, default = None, help = 'dir used to store logger')
    parser.add_argument('--policynum', type = int, default = 1, help = 'policy update num')
    parser.add_argument('--cuda', type = int, default = 0, help = 'cuda id')
    parser.add_argument('--alg', type = str, default = "redq", help = 'algorithm name')
    parser.add_argument('--lambd', type = int, default = 0, help = 'q_std coefficient')
    parser.add_argument('--label', type = str, default = "test", help = 'label to identify experiment')
    parser.add_argument('--testnum', type = int, default = 1, help = 'test num')
    parser.add_argument('--lr', type = np.float32, default = 3e-4, help = 'lr')
    parser.add_argument('--batchsize', type = int, default = 256, help = 'batch size')
    parser.add_argument('--usebn', action = 'store_true', default = False, help = 'use batch normiaztion or not')
    parser.add_argument('--meanstd', action = 'store_true', default = False, help = 'plot mean and std cures')


#    parser.add_argument('')
    args = parser.parse_args()

    if (args.meanstd):
        cal_mean_std(args)
    else:
        cuda_id = "cuda:" + str(args.cuda)
        device = torch.device(cuda_id if torch.cuda.is_available() else "cpu")


        log = logger(args)

        init_seed(args.seed)
        env = create_env(args.env, args.seed)
        test_env = create_env(args.env, args.seed + 1000)
        if args.alg == "redq" :
            agent = redq(args.env, env, test_env, device, logger = log, epochs_num = args.epochs,  lr = args.lr, Qnums = args.Qnums,
                        Q_update_num = args.UTD, Policy_update_num = args.policynum, test_num =args.testnum, use_bn = args.usebn,
                        batch_size = args.batchsize)
        elif args.alg == "dis_redq" :
            agent = dis_redq(args.env, env, test_env, device, logger = log, epochs_num = args.epochs,  Qnums = args.Qnums,
                        Q_update_num = args.UTD, Policy_update_num = args.policynum, lam = args.lambd, test_num =args.testnum)
        elif args.alg == "oneq_redq" :
            agent = oneq_redq(args.env, env, test_env, device, logger = log, epochs_num = args.epochs,  Qnums = args.Qnums,
                        Q_update_num = args.UTD, Policy_update_num = args.policynum, lam = args.lambd, test_num =args.testnum)
        elif args.alg == "dredq" :
            agent = dredq(args.env, env, test_env, device, logger = log, epochs_num = args.epochs,  Qnums = args.Qnums,
                        Q_update_num = args.UTD, Policy_update_num = args.policynum, test_num =args.testnum)
        elif args.alg == "sredq" :
            agent = sredq(args.env, env, test_env, device, logger = log, epochs_num = args.epochs,  Qnums = args.Qnums,
                        Q_update_num = args.UTD, Policy_update_num = args.policynum, test_num =args.testnum)
        agent.train()
        log.close()

        save_model(agent, args)


if __name__ == "__main__":
    main()


