from mopo.models.constructor import construct_origin_model, construct_single_model, construct_shared_model, format_samples_for_training, load_env
import gym
import d4rl
import numpy as np
import tensorflow as tf
import mopo.mask
import os
import argparse
from RLA.easy_log.tester import tester

os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'

mopo_path = os.path.split(mopo.__file__)[0]
root_path = os.path.split(mopo_path)[0]
experiments_path = os.path.join(root_path, "experiments")


def load_model(env_name, network_structure, mask, model_save_dir, hidden_dim=100):
    tf.reset_default_graph()
    env = gym.make(env_name)
    obs_dim = env.observation_space.shape[0]
    act_dim = env.action_space.shape[0]

    if network_structure is "origin":
        model = construct_origin_model(obs_dim=obs_dim, act_dim=act_dim, name="BNN_0", load_dir=model_save_dir,
                                       hidden_dim=hidden_dim)
    elif network_structure is "single":
        model = construct_single_model(obs_dim=obs_dim, act_dim=act_dim, mask=mask,
                                       name="BNN_0", load_dir=model_save_dir, hidden_dim=hidden_dim)
    elif network_structure is "shared":
        model = construct_shared_model(obs_dim=obs_dim, act_dim=act_dim, mask=mask,
                                       name="BNN_0", load_dir=model_save_dir, hidden_dim=hidden_dim)
    else:
        raise NotImplementedError

    return model


def cross_policy_evaluate(train_env_name, test_env_name, 
                          network_structure, mask, 
                          model_save_dir,
                          hidden_dim=100,
                          use_diff_predict=True,
                          normalize=False):
    _, _, train_inputs, train_outputs = load_env(train_env_name, use_diff_predict, normalize)
    _, _, test_inputs, test_outputs = load_env(test_env_name, use_diff_predict, normalize)

    model = load_model(train_env_name, network_structure, mask, model_save_dir, hidden_dim)

    train_origin_losses = model.get_losses(train_inputs, train_outputs, batch_size=20480, verbose=True)[0]
    test_origin_losses = model.get_losses(test_inputs, test_outputs, batch_size=20480, verbose=True)[0]
    train_final_losses = calculate_elite_losses(train_origin_losses)
    test_final_losses = calculate_elite_losses(test_origin_losses)

    return train_final_losses, test_final_losses


def calculate_elite_losses(losses, num_elites=5):
    ensemble_loss = np.mean(losses, axis=1)
    total_loss = None
    elites = np.argsort(ensemble_loss)[:num_elites]
    for elite in elites:
        if total_loss is None:
            total_loss = losses[elite]
        else:
            total_loss += losses[elite]

    total_loss = total_loss / num_elites
    return total_loss


def get_mask(base_env_name, mask):
    # if "-" in structure_name:
    mask = mask.split("-")[0]

    return mopo.mask.masks['{}_{}'.format(base_env_name, mask)]


def format_vector(v):
    if len(v.shape) == 2:
        v = v[0]
    return " ".join(map(lambda x: "{:6.3f}".format(x), v))


# def compare_two_structure(structure_name1, structure_name2, base_env_name):
#     structure1 = get_structure(structure_name1, base_env_name)
#     structure2 = get_structure(structure_name2, base_env_name)
#
#     base_env_name = env_name.split("-")[0]
#     hidden_dim = int(model_name.split("-")[-1])
#
#     if model_save_dir is None:
#         model_save_dir = os.path.join(experiments_path, base_env_name, env_name, model_name)
#
#     train_final_losses1, test_final_losses1 = test_policy_evaluate("{}-random-v0".format(base_env_name),
#                                                                       "{}-medium-v0".format(base_env_name),
#                                                                       structure_name1, structure1)
#     train_final_losses2, test_final_losses2 = test_policy_evaluate("{}-random-v0".format(base_env_name),
#                                                                       "{}-medium-v0".format(base_env_name),
#                                                                       structure_name2, structure2)
#     train_ratio = train_final_losses1 / train_final_losses2
#     test_ratio = test_final_losses1 / test_final_losses2
#
#     np.set_printoptions(precision=3, suppress=True)
#     print("train_ratio:\t", format_vector(train_ratio))
#     print("test_ratio:\t", format_vector(test_ratio))
#     return train_ratio, test_ratio
#
#
# def compare_predict_vs_groundtruth(train_env_name="particle-medium-v0",
#                                    test_env_name="particle-medium-replay-v0"):
#     from constructor import data_info
#     base_env_name = train_env_name.split("-")[0]
#     model_names = ["oracle-100", "dense-100"]
#     idx = 10300
#     obs_dim, act_dim, inputs, outputs = load_env(test_env_name, use_diff_predict=False, normalize=False)
#     sample_input, sample_ouput = inputs[idx][None], outputs[idx][None]
#     inputs_mean, inputs_std, outputs_mean, outputs_std = data_info[base_env_name].values()
#     result = []
#     for model_name in model_names:
#         model = load_model(train_env_name, model_name, get_structure(model_name, "particle"))
#         predict_output = model.predict((sample_input - inputs_mean) / inputs_std)[0] * outputs_std + outputs_mean
#         result.append([predict_output])
#     print(format("input:", " <15"), format_vector(sample_input))
#     print(format("output:", " <15"), format_vector(sample_ouput))
#     for i, model_name in enumerate(model_names):
#         print(model_name)
#         print(format("predict output:", " <15"), format_vector(result[i][0]))
#
#
if __name__ == "__main__":
    # compare_two_structure("dense-100", "oracle-100", "particle")
    get_mask("dense", "hopper")
