from mopo.models.constructor import construct_origin_model, construct_single_model, format_samples_for_training, load_env
import gym
import d4rl
import numpy as np
import tensorflow as tf
import mopo.mask
import os
import argparse
from RLA.easy_log.tester import tester
import random

os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'

mopo_path = os.path.split(mopo.__file__)[0]
root_path = os.path.split(mopo_path)[0]
experiments_path = os.path.join(root_path, "experiments")
d4rl_dataset_names = ["medium-replay", "medium", "random"]


def boolean_flag(parser, name, default=False, help=None):
    dest = name.replace('-', '_')
    parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
    parser.add_argument("--no-" + name, action="store_false", dest=dest)


def argsparser():
    # [RLA] config your hyper-parameter here.
    # example:
    # python train_model.py --task break_as_mopo --env_name particle-medium-v0 --structure_name dense --hidden_dim 100
    seed = random.randint(0, 9999)

    parser = argparse.ArgumentParser("Train coupon policy in simulator")
    parser.add_argument('--info', help='environment ID', default='default_info')
    parser.add_argument('--task', type=str, default='default_task')
    parser.add_argument('--env_name', type=str, default='particle-medium-replay-v0')
    parser.add_argument('--structure_name', type=str, default='dense')
    parser.add_argument('--seed', type=int, default=seed)
    parser.add_argument('--resample_type', type=str, default="without")
    boolean_flag(parser, 'inherit_hp', default=False)
    parser.add_argument('--hidden_dim', type=int, default=100)
    args = parser.parse_args()
    return args


def load_cross_dataset(env_name, use_diff_predict=False):
    base_env_name = env_name.split("-")[0]
    this_dataset_name = env_name[len(base_env_name) + 1: -3]
    cross_dataset_names = set(d4rl_dataset_names) - {this_dataset_name}

    cross_env_dataset = {}
    for name in cross_dataset_names:
        cross_env_name = "{}-{}-v0".format(base_env_name, name)
        obs_dim, act_dim, train_inputs, train_outputs = load_env(cross_env_name, use_diff_predict=use_diff_predict)
        cross_env_dataset[name] = {"inputs": train_inputs, "targets": train_outputs}

    return cross_env_dataset


def train_model(env_name, network_structure, model_name, hidden_dim=200, seed=10086,
                model_save_dir=None, resample_type='sqrt'):
    tf.reset_default_graph()
    base_env_name = env_name.split("-")[0]

    obs_dim, act_dim, train_inputs, train_outputs = load_env(env_name, use_diff_predict=False)
    cross_env_dataset = load_cross_dataset(env_name, use_diff_predict=False)

    tf.reset_default_graph()
    gpu_options = tf.GPUOptions(allow_growth=True)
    session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    if network_structure is None:
        model = construct_origin_model(obs_dim=obs_dim, act_dim=act_dim, hidden_dim=hidden_dim, session=session)
        model_metrics = model.train(train_inputs, train_outputs, batch_size=256, holdout_ratio=0.2, rla=True, seed=seed,
                                    # max_epochs=500,
                                    # max_epochs_since_update=500,
                                    cross_env_dataset=cross_env_dataset)
    else:
        model = construct_single_model(obs_dim=obs_dim, act_dim=act_dim, network_structure=network_structure,
                                       hidden_dim=hidden_dim, session=session)
        model_metrics = model.train(train_inputs, train_outputs, batch_size=256, holdout_ratio=0.2, rla=True, seed=seed,
                                    # max_epochs=500,
                                    # max_epochs_since_update=500,
                                    cross_env_dataset=cross_env_dataset,
                                    resample_type=resample_type)

    if model_save_dir is None:
        model_save_dir = os.path.join(experiments_path, base_env_name, env_name, "{}-{}".format(model_name, hidden_dim))
    os.makedirs(model_save_dir, exist_ok=True)
    model.save(model_save_dir, timestep=0)

    with open(os.path.join(model_save_dir, "var_loss.txt"), "w") as f:
        f.write(str(model_metrics['val_loss']) + "\n")
    return model_metrics


def get_structure(structure_name, base_env_name):
    if "-" in structure_name:
        structure_name = structure_name.split("-")[0]

    if structure_name == "none":
        return None
    else:
        return mopo.network_structure.structures['{}_{}'.format(base_env_name, structure_name)]


def main():
    args = argsparser()
    kwargs = vars(args)

    tester.set_hyper_param(**kwargs)
    tester.update_hyper_param('env_name', args.env_name)
    tester.update_hyper_param('structure_name', args.structure_name)
    tester.clear_record_param()
    tester.add_record_param(['info', 'env_name', 'structure_name', 'hidden_dim', 'seed', 'resample_type'])

    tester.configure(task_name=args.task,
                     private_config_path=os.path.join(root_path, 'rla_config_mopo.yaml'),
                     run_file='cross_policy_evaluate.py', log_root=root_path)
    tester.log_files_gen()
    tester.print_args()
    # [RLA] --- complete configuration ---

    # [RLA] (optional) load an older experiment and resume training/ evaluation.
    # from RLA.easy_log.tester import experimental_loader
    # experimental_loader.config(task_name=args.load_task_name, record_date=args.load_date,
    #                            root=get_package_path(), inherit_hp=args.inherit_hp)
    # if args.load_date is not None and args.inherit_hp:
    #     load_args = experimental_loader.import_hyper_parameters()

    structure = get_structure(args.structure_name, args.env_name.split("-")[0])
    train_model(args.env_name, structure, args.structure_name, hidden_dim=args.hidden_dim, seed=args.seed,
                model_save_dir=tester.log_dir, resample_type=args.resample_type)


if __name__ == '__main__':
    # structure = get_structure("oracle", "particle")
    # train_model("particle-medium-replay-v0", structure, "oracle", 100)

    main()
