import numpy as np
import tensorflow as tf
import pdb

from mopo.models.fc import FC
from mopo.models.bnn import BNN
from mopo.models.sparse_fc import SparseFC
from mopo.models.causal_bnn import CausalBNN
from mopo.models.shared_causal_bnn import SharedCausalBNN
from mopo.mask import masks


def construct_origin_model(obs_dim=11, act_dim=3, rew_dim=1, hidden_dim=200, num_networks=7,
                           num_elites=5, session=None, model_type='mlp', separate_mean_var=True,
                           name=None, load_dir=None, deterministic=False):
    if name is None:
        name = 'BNN'
    print('[ BNN ] Name {} | Observation dim {} | Action dim: {} | Hidden dim: {}'.format(name, obs_dim, act_dim,
                                                                                          hidden_dim))
    params = {'name': name, 'num_networks': num_networks, 'num_elites': num_elites,
              'sess': session, 'separate_mean_var': separate_mean_var, 'deterministic': deterministic}

    if load_dir is not None:
        print('Specified load dir', load_dir)
        params['model_dir'] = load_dir

    model = BNN(params)

    if not model.model_loaded:
        if model_type == 'identity':
            return
        elif model_type == 'linear':
            print('[ BNN ] Training linear model')
            model.add(FC(obs_dim + rew_dim, input_dim=obs_dim + act_dim, weight_decay=0.000025))
        elif model_type == 'mlp':
            print('[ BNN ] Training non-linear model | Obs: {} | Act: {} | Rew: {}'.format(obs_dim, act_dim, rew_dim))
            model.add(FC(hidden_dim, input_dim=obs_dim + act_dim, activation="swish", weight_decay=0.000025))
            model.add(FC(hidden_dim, activation="swish", weight_decay=0.00005))
            model.add(FC(hidden_dim, activation="swish", weight_decay=0.000075))
            model.add(FC(hidden_dim, activation="swish", weight_decay=0.000075))
            model.add(FC(obs_dim + rew_dim, weight_decay=0.0001))
            if separate_mean_var:
                model.add(FC(obs_dim + rew_dim, input_dim=hidden_dim, weight_decay=0.0001), var_layer=True)

    if load_dir is not None:
        model.model_loaded = True

    model.finalize(tf.train.AdamOptimizer, {"learning_rate": 0.001})
    print('[ BNN ] Model: {}'.format(model))
    return model


def construct_single_model(obs_dim=11, act_dim=3, rew_dim=1, hidden_dim=200, num_networks=7,
                           num_elites=5, session=None, model_type='mlp', separate_mean_var=True,
                           name=None, load_dir=None, deterministic=False, mask=None,
                           mask_loss_coef=0.1):
    if name is None:
        name = 'BNN'
    print('[ BNN ] Name {} | Observation dim {} | Action dim: {} | Hidden dim: {}'.format(name, obs_dim, act_dim,
                                                                                          hidden_dim))
    params = {'name': name, 'num_networks': num_networks, 'num_elites': num_elites,
              'sess': session, 'separate_mean_var': separate_mean_var, 'deterministic': deterministic,
              'mask': masks[mask], "mask_loss_coef": mask_loss_coef}

    if load_dir is not None:
        print('Specified load dir', load_dir)
        params['model_dir'] = load_dir

    # num_networks是model ensemble的数量
    casual_model = CausalBNN(params, act_dim, obs_dim, rew_dim)

    if not casual_model.model_loaded:
        if model_type == 'identity':
            return
        elif model_type == 'linear':
            print('[ BNN ] Training linear model')
            casual_model.add(SparseFC(obs_dim + rew_dim, input_dim=obs_dim + act_dim, weight_decay=0.000025))
        elif model_type == 'mlp':
            # 第一层必须指定input，后面层的input可以自动计算
            casual_model.add(
                SparseFC(hidden_dim, input_dim=obs_dim + act_dim, activation="swish", weight_decay=0.000025))
            casual_model.add(SparseFC(hidden_dim, activation="swish", weight_decay=0.00005))
            casual_model.add(SparseFC(hidden_dim, activation="swish", weight_decay=0.000075))
            casual_model.add(SparseFC(1, weight_decay=0.0001))
            if separate_mean_var:
                casual_model.add(SparseFC(1, input_dim=hidden_dim, weight_decay=0.0001), var_layer=True)

    if load_dir is not None:
        casual_model.model_loaded = True

    casual_model.finalize(tf.train.AdamOptimizer, {"learning_rate": 0.001})
    print('[ BNN ] Model: {}'.format(casual_model))
    return casual_model


def construct_shared_model(obs_dim=11, act_dim=3, rew_dim=1, hidden_dim=200, num_networks=7,
                           num_elites=5, session=None, model_type='mlp', separate_mean_var=True,
                           name=None, load_dir=None, deterministic=False, mask=None,
                           mask_loss_coef=0.1):
    if name is None:
        name = 'BNN'
    print('[ BNN ] Name {} | Observation dim {} | Action dim: {} | Hidden dim: {}'.format(name, obs_dim, act_dim,
                                                                                          hidden_dim))
    params = {'name': name, 'num_networks': num_networks, 'num_elites': num_elites,
              'sess': session, 'separate_mean_var': separate_mean_var, 'deterministic': deterministic,
              'mask': masks[mask], "mask_loss_coef": mask_loss_coef}

    if load_dir is not None:
        print('Specified load dir', load_dir)
        params['model_dir'] = load_dir

    # num_networks是model ensemble的数量
    model = SharedCausalBNN(params, act_dim, obs_dim, rew_dim)

    if not model.model_loaded:
        if model_type == 'identity':
            return
        elif model_type == 'linear':
            print('[ BNN ] Training linear model')
            model.add(SparseFC(obs_dim + rew_dim, input_dim=obs_dim + act_dim, weight_decay=0.000025))
        elif model_type == 'mlp':
            # 第一层必须指定input，后面层的input可以自动计算
            model.add(FC(hidden_dim, input_dim=obs_dim + act_dim, activation="swish", weight_decay=0.000025))
            model.add(FC(hidden_dim, activation="swish", weight_decay=0.00005))
            model.add(FC(hidden_dim, activation="swish", weight_decay=0.000075))
            model.add(FC(obs_dim + rew_dim, weight_decay=0.0001))
            if separate_mean_var:
                model.add(FC(obs_dim + rew_dim, input_dim=hidden_dim, weight_decay=0.0001), var_layer=True)

    if load_dir is not None:
        model.model_loaded = True

    model.finalize(tf.train.AdamOptimizer, {"learning_rate": 0.001})
    print('[ BNN ] Model: {}'.format(model))
    return model

def format_samples_for_training(samples, use_diff_predict=True):
    obs = samples['observations']
    act = samples['actions']
    next_obs = samples['next_observations']
    rew = samples['rewards']

    inputs = np.concatenate((obs, act), axis=-1)
    if use_diff_predict:
        outputs = np.concatenate((rew, next_obs - obs), axis=-1)
    else:
        outputs = np.concatenate((rew, next_obs), axis=-1)
    return inputs, outputs


data_info = {"particle": {
    "inputs_mean": [2.43362, 1.74263, 0.57324, 0.577, 34.054, 36.40495, -11.50038],
    "inputs_std": [1.90924, 0.81155, 1.16403, 1.28664, 41.70129, 45.03598, 50.00444],
    "outputs_mean": [0.70208, 2.43362, 1.74263, 0.57324, 0.57701, 34.05412, 36.405],
    "outputs_std": [1.47081, 1.90925, 0.81155, 1.16403, 1.28664, 41.70139, 45.036]},
    "hopper": {
        "inputs_mean": [1.17185, 0.02741, -0.43839, -0.19319, 0.22063, 1.8764, -0.40415, 0.04117, -0.93514, -0.12475,
                        -0.53201, 0.06015, 0.16607, -0.10911],
        "inputs_std": [0.23107, 0.06548, 0.41216, 0.25275, 0.60946, 0.99427, 1.20899, 0.94513, 1.81075, 2.55713, 5.2216,
                       0.51421, 0.59591, 0.70486],
        "outputs_mean": [2.89105, 1.17185, 0.02741, -0.43839, -0.1932, 0.22063, 1.87643, -0.40415, 0.04117, -0.93514,
                         -0.12475, -0.53199],
        "outputs_std": [0.99026, 0.23108, 0.06548, 0.41216, 0.25275, 0.60947, 0.99426, 1.209, 0.94514, 1.81075, 2.55714,
                        5.22163]},
}


def load_env(env_name, use_diff_predict=True, normalize=True):
    import d4rl
    import gym
    env = gym.make(env_name)
    env_samples = d4rl.qlearning_dataset(env)
    env_samples['rewards'] = env_samples['rewards'].reshape([-1, 1])
    inputs, outputs = format_samples_for_training(env_samples, use_diff_predict=use_diff_predict)
    obs_dim = env.observation_space.shape[0]
    act_dim = env.action_space.shape[0]

    base_env_name = env_name.split("-")[0]

    if normalize:
        inputs = (inputs - data_info[base_env_name]["inputs_mean"]) / data_info[base_env_name]["inputs_std"]
        outputs = (outputs - data_info[base_env_name]["outputs_mean"]) / data_info[base_env_name]["outputs_std"]

    return obs_dim, act_dim, inputs, outputs


# def visualize_data(env_name, model=None):
#     import pandas as pd
#     from pandas_profiling import ProfileReport
#     obs_dim, act_dim, inputs, outputs = load_env(env_name, use_diff_predict=False, normalize=False)

#     df = pd.DataFrame(np.concatenate([inputs, outputs], axis=1))

#     columns = ["s_t_{}".format(i + 1) for i in range(obs_dim)] + \
#               ["action_{}".format(i + 1) for i in range(act_dim)] + \
#               ["reward"] + \
#               ["s_t'_{}".format(i + 1) for i in range(obs_dim)]
#     df.columns = columns

#     profile = ProfileReport(df, title="{} Report".format(env_name))
#     profile.to_file("{}_report.html".format(env_name))


def get_mean_std(env_names, use_diff_predict=False):
    import d4rl
    import gym
    inputs_list, outputs_list = [], []
    for env_name in env_names:
        env = gym.make(env_name)
        env_samples = d4rl.qlearning_dataset(env)
        env_samples['rewards'] = env_samples['rewards'].reshape([-1, 1])
        inputs, outputs = format_samples_for_training(env_samples, use_diff_predict=use_diff_predict)
        inputs_list.append(inputs)
        outputs_list.append(outputs)

    print(np.concatenate(inputs_list).mean(axis=0))
    print(np.concatenate(inputs_list).std(axis=0))
    print(np.concatenate(outputs_list).mean(axis=0))
    print(np.concatenate(outputs_list).std(axis=0))


def reset_model(model):
    model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=model.name)
    model.sess.run(tf.initialize_vars(model_vars))


# if __name__ == '__main__':
    # casual_model = construct_casual_model()
    # get_mean_std(['hopper-medium-replay-v0', 'hopper-medium-v0', 'hopper-random-v0'], False)
    #
    # visualize_data('particle-medium-replay-v0')
    # visualize_data('particle-medium-v0')
    # visualize_data('particle-random-v0')
