from mopo.models.constructor import construct_single_model, load_env
from mopo.models.cross_policy_evaluate import calculate_elite_losses
import gym
import d4rl
import numpy as np
import tensorflow as tf
import numpy as np
import mopo.mask
import argparse
import json
import os

env_names = ["hopper-medium-replay-v0", "hopper-medium-v0", "hopper-random-v0"]

tf.reset_default_graph()

def load_json(train_env_name, test_env_name, save_dir="tmp"):
    path = os.path.join(save_dir, "{}_{}_mask.json".format(train_env_name, test_env_name))
    if os.path.exists(path):
        with open(path, "r") as f:
            json_mask_loss_dicts = f.read()
        mask_loss_dicts = json.loads(json_mask_loss_dicts)
        return mask_loss_dicts
    else:
        return None

def feature_selection(train_env_name, test_env_name, hidden_dim=100, save_dir="/home/amax/thl/mopo/mopo/models/tmp"):
    os.makedirs(save_dir, exist_ok=True)
    env = gym.make(train_env_name)
    obs_dim = env.observation_space.shape[0]
    act_dim = env.action_space.shape[0]
    mask_loss_dicts = load_json(train_env_name, test_env_name, obs_dim, save_dir)
    if mask_loss_dicts is None:
        mask_loss_dicts = []
        for i in range(obs_dim + 1):
            mask_loss_dicts.append({})

    mask = choose_mask(mask_loss_dicts, obs_dim, act_dim)
    while not (mask == 0).all():
        losses = train_and_test(train_env_name, test_env_name, mask, hidden_dim)
        mask_loss_dicts = register_loss(mask_loss_dicts, mask, losses)
        mask = choose_mask(mask_loss_dicts, obs_dim, act_dim)

    json_mask_loss_dicts = json.dumps(mask_loss_dicts, indent=4)
    with open(os.path.join(save_dir, "{}_{}_mask.json".format(train_env_name, test_env_name)), "w") as f:
        f.write(json_mask_loss_dicts)

def choose_mask(mask_loss_dicts, obs_dim, act_dim):
    mask = np.zeros([12, 14], dtype=np.int8)
    for out_dim in range(len(mask_loss_dicts)):
        mask_loss_dict = mask_loss_dicts[out_dim]
        best_greedy_choose = []
        for in_dim in range(obs_dim + act_dim):
            choose_this_dim_mask = best_greedy_choose.copy() + [1]
            non_choose_this_dim_mask = best_greedy_choose.copy() + [0]
            for i in range(obs_dim + act_dim - in_dim - 1):
                choose_this_dim_mask.append(1)
                non_choose_this_dim_mask.append(1)
            if " ".join(map(str, choose_this_dim_mask)) in mask_loss_dict:
                choose_this_dim_loss = mask_loss_dict[" ".join(map(str, choose_this_dim_mask))]
            else:
                mask[out_dim] = np.array(choose_this_dim_mask, dtype=np.int8)
                break
            if " ".join(map(str, non_choose_this_dim_mask)) in mask_loss_dict:
                non_choose_this_dim_loss = mask_loss_dict[" ".join(map(str, non_choose_this_dim_mask))]
            else:
                mask[out_dim] = np.array(non_choose_this_dim_mask, dtype=np.int8)
                break
            # print(choose_this_dim_loss, non_choose_this_dim_loss)
            if choose_this_dim_loss < non_choose_this_dim_loss:
                best_greedy_choose.append(1)
            else:
                best_greedy_choose.append(0)
    return mask


def register_loss(mask_loss_dicts, mask, losses):
    for i in range(len(mask_loss_dicts)):
        mask_loss_dicts[i][" ".join(map(str, mask[i]))] = losses[i]
    return mask_loss_dicts


def train_and_test(train_env_name, test_env_name, mask, hidden_dim=100):
    tf.reset_default_graph()
    obs_dim, act_dim, train_inputs, train_outputs = load_env(train_env_name, use_diff_predict=True, normalize=False)
    model = construct_single_model(obs_dim=obs_dim, act_dim=act_dim,
                                   network_structure=mask,
                                   hidden_dim=hidden_dim)
    model.train(train_inputs, train_outputs, batch_size=256, holdout_ratio=0.2)
    _, _, test_inputs, test_outputs = load_env(test_env_name, use_diff_predict=True, normalize=False)

    test_origin_losses = model.get_losses(test_inputs, test_outputs, batch_size=20480, verbose=True)[0]
    test_final_losses = calculate_elite_losses(test_origin_losses)
    return test_final_losses

def get_best_mask(train_env_name, test_env_name, save_dir="/home/amax/thl/mopo/mopo/models/tmp"):
    mask_loss_dicts = load_json(train_env_name, test_env_name, save_dir)
    best_mask = []
    for loss_dict in mask_loss_dicts:
        best_mask.append(list(map(int, min(loss_dict.items(), key=lambda x: x[1])[0].split())))
    best_mask = np.array(best_mask).transpose()
    print(best_mask)



if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Select feature of causal model.')
    parser.add_argument('train_env_name', type=str, default="hopper-random-v0")
    parser.add_argument('test_env_name', type=str, default="hopper-medium-replay-v0")
    parser.add_argument('--hidden_dim', type=int, default=100)
    args = parser.parse_args()
    get_best_mask(args.train_env_name, args.test_env_name)
