import argparse
from gher.utils.launch_util import setup_logger
from gher.experiment.sac_experiment import sac_experiment

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--env', type=str, default='test_env', help='实验环境')
    parser.add_argument('--alg', type=str, default='SAC', help='实验的算法')
    parser.add_argument('--n_sampled_latents', type=int, default=5, help="sample的隐藏层数")
    parser.add_argument('--n_to_take', type=int, default=1, help="用于relabel的隐藏层应少于sample的")
    parser.add_argument('--relabel', action='store_true', help='是否有relabel')
    parser.add_argument('--use_advantages', '-use_adv', action='store_true', help='relabel带advantage')
    parser.add_argument('--irl', action='store_true', help='使用IRL来选择 relabeling latents')
    parser.add_argument('--plot', action='store_true', help='是否画轨迹图')
    parser.add_argument('--cache', action='store_true')
    parser.add_argument('--sparse', type=float, default=None)
    parser.add_argument('--ngradsteps', type=int, default=100)
    parser.add_argument('--nexpl', type=int, default=None)
    parser.add_argument('--horizon', type=int, default=None)
    parser.add_argument('--tau', type=float, default=5E-3)
    parser.add_argument('--lr', type=float, default=None)
    parser.add_argument('--buffer_size', type=int, default=None)
    parser.add_argument('--discount', type=float, default=None)
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--reward_scale', type=float, default=None)
    parser.add_argument('--insert_time', action='store_true')
    parser.add_argument('--latent_shape_multiplier', type=int, default=1)
    parser.add_argument('--latent_to_all_layers', action='store_true')

    parser.add_argument('--seed', type=int, default=0, help="random seed")
    parser.add_argument('--n_experiments', '-n', type=int, default=1,
                        help="number of random seeds to use. If not -1, overrides seed ")

    # experiment name
    parser.add_argument('--exp_name', '-name', type=str, default=None)
    parser.add_argument('--extra', '-x', type=str, default=None)
    parser.add_argument('--test', '-test', action='store_true')
    parser.add_argument('--epochs', type=int, default=100, help="number of latents to sample")
    parser.add_argument('--save_videos', default='true', action='store_true')

    args = parser.parse_args()

    # 随机数种子
    if args.n_experiments != -1:
        seeds = list(range(10, 10 + 10 * args.n_experiments, 10))
    else:
        seeds = [args.seed]
    # 用于relabel的隐藏层应少于sample的
    assert args.n_to_take <= args.n_sampled_latents

    # 创建默认参数字典
    variant = dict(
        seed=args.seed,
        algorithm=args.alg,
        env_name=args.env,
        algo_kwargs=dict(
            batch_size=256,
            num_epochs=args.epochs,
            num_eval_steps_per_epoch=50000,
            num_expl_steps_per_train_loop=75,
            num_trains_per_train_loop=args.ngradsteps,
            min_num_steps_before_training=1000,
            max_path_length=2250,
        ),
        trainer_kwargs=dict(
            discount=0.99,  # 0.99
            soft_target_tau=args.tau,
            target_update_period=1,
            policy_lr=3E-3,  # 3e-4
            qf_lr=3E-3,  # 3e-4
            reward_scale=1,
            use_automatic_entropy_tuning=True,
        ),
        replay_buffer_kwargs=dict(
            max_replay_buffer_size=100000,
            latent_dim=3,
            approx_irl=args.irl,
            plot=args.plot,
        ),
        relabeler_kwargs=dict(
            relabel=args.relabel,
            use_adv=args.use_advantages,
            n_sampled_latents=args.n_sampled_latents,
            n_to_take=args.n_to_take,
            cache=args.cache,
        ),
        qf_kwargs=dict(
            hidden_sizes=[300, 300, 300],
            latent_shape_multiplier=args.latent_shape_multiplier,
            latent_to_all_layers=args.latent_to_all_layers,
        ),
        policy_kwargs=dict(
            hidden_sizes=[300, 300, 300],
            latent_shape_multiplier=args.latent_shape_multiplier,
            latent_to_all_layers=args.latent_to_all_layers,
        ),
        path_collector_kwargs=dict(
            save_videos=args.save_videos
        ),
        use_advantages=args.use_advantages,
        proper_advantages=True,
        plot=args.plot,
        test=args.test,
        gpu=args.gpu,
    )

    logger_kwargs = dict(snapshot_mode='gap_and_last', snapshot_gap=min(50, args.epochs - 1))

    # 针对不同环境，修改默认参数
    if args.env == 'usv_env_default':
        variant['relabeler_kwargs']['power'] = 1
        variant['env_kwargs'] = dict(horizon=variant['algo_kwargs']['max_path_length'])
        exp_postfix = ''
        variant['algo_kwargs']['batch_size'] = 128
        variant['qf_kwargs']['hidden_sizes'] = [400, 300]
        variant['policy_kwargs']['hidden_sizes'] = [400, 300]
    elif args.env == 'test_env':
        variant['relabeler_kwargs']['power'] = 1
        variant['env_kwargs'] = dict(horizon=variant['algo_kwargs']['max_path_length'])
        exp_postfix = ''
        variant['algo_kwargs']['batch_size'] = 128
        variant['qf_kwargs']['hidden_sizes'] = [400, 300]
        variant['policy_kwargs']['hidden_sizes'] = [400, 300]
    else:
        raise NotImplementedError

    # 自定义参数调整
    if args.nexpl is not None:
        variant['algo_kwargs']['num_expl_steps_per_train_loop'] = args.nexpl
    if args.discount is not None:
        variant['trainer_kwargs']['discount'] = args.discountZ
    if args.lr is not None:
        variant['trainer_kwargs']['policy_lr'] = args.lr
        variant['trainer_kwargs']['qf_lr'] = args.lr
    if args.buffer_size is not None:
        variant['replay_buffer_kwargs']['max_replay_buffer_size'] = args.buffer_size
    if args.reward_scale is not None and args.reward_scale > 0:
        variant['trainer_kwargs']['reward_scale'] = args.reward_scale
        variant['trainer_kwargs']['use_automatic_entropy_tuning'] = False
    if args.exp_name is not None:
        exp_dir = args.exp_name
    else:
        exp_dir = 'gher-{}-{}-{}e-{}s-disc{}'.format(args.env,
                                                     variant['algorithm'],
                                                     str(args.epochs),
                                                     str(variant['algo_kwargs']['num_expl_steps_per_train_loop']),
                                                     str(variant['trainer_kwargs']['discount']))
        if len(exp_postfix) > 0:
            exp_dir += '-' + exp_postfix
    if args.extra is not None:
        exp_dir += '-' + args.extra
    if args.test:
        exp_dir += '-test'

    setup_logger(exp_dir, variant=variant, seed=variant['seed'], **logger_kwargs)

    if args.alg == 'SAC':
        sac_experiment(variant)
    else:
        raise NotImplementedError
