{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pickle\n",
    "from copy import deepcopy\n",
    "import itertools\n",
    "from collections import OrderedDict\n",
    "import datetime\n",
    "import pandas as pd\n",
    "import pprint as pp\n",
    "import scipy\n",
    "from sklearn.cluster import KMeans\n",
    "import sympy\n",
    "from sympy import Symbol\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.autograd import Variable\n",
    "import torch.nn.functional as F\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "\n",
    "import sys, os\n",
    "sys.path.append(os.path.join(os.path.dirname(\"__file__\"), '..', '..'))\n",
    "from AI_physicist.theory_learning.theory_model import Theory_Training, get_loss, load_info_dict, get_best_model_idx\n",
    "from AI_physicist.theory_learning.models import Loss_Fun_Cumu, get_Lagrangian_loss\n",
    "from AI_physicist.theory_learning.util_theory import plot_theories, plot3D, plot_indi_domain, to_one_hot, get_piecewise_dataset\n",
    "from AI_physicist.theory_learning.models import Statistics_Net, Generative_Net\n",
    "from AI_physicist.settings.filepath import theory_PATH\n",
    "from AI_physicist.pytorch_net.util import Loss_Fun, Loss_with_uncertainty, Batch_Generator, get_criterion, to_np_array, make_dir, Early_Stopping\n",
    "from AI_physicist.pytorch_net.util import record_data, plot_matrices, get_args, sort_two_lists, get_param_name_list\n",
    "from AI_physicist.pytorch_net.net import MLP, Model_Ensemble, combine_model_ensembles, load_model_dict, load_model_dict_net, load_model_dict_model_ensemble, construct_model_ensemble_from_nets"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Helper functions for symbolic unification:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "def unification_symbolic(\n",
    "    theory_collection,\n",
    "    num_clusters,\n",
    "    fraction_threshold=1,\n",
    "    relative_diff_threshold=0,\n",
    "    verbose=True,\n",
    "    ):\n",
    "    \"\"\"Unification of symbolic theories, implementing the Alg. 4 in Wu and Tegmark (2019).\"\"\"\n",
    "    df_dict_list = []\n",
    "    skeleton_dict = {}\n",
    "    for key, theory in theory_collection.items():\n",
    "        # Record all different kinds of skeletons\n",
    "        if theory.pred_net.layer_0.__class__.__name__ == \"Symbolic_Layer\": # Only unify models with Symbolic_Layer\n",
    "            df_dict = theory.pred_net.get_sympy_expression(verbose = False)[0]\n",
    "            if df_dict is None:\n",
    "                print(\"{0} is not a symbolic net!\".format(key))\n",
    "                continue\n",
    "            # Canonicalize (each expression is already in tree-form in sympy), so only have to generate the skeleton:\n",
    "            skeleton_list = []\n",
    "            param_tree_list = []\n",
    "            for expression in df_dict[\"numerical_expression\"]:\n",
    "                skeleton, param_tree = get_skeleton(expression)\n",
    "                skeleton_list.append(skeleton)\n",
    "                param_tree_list.append(param_tree)\n",
    "            df_dict[\"skeleton\"] = skeleton_list\n",
    "            df_dict[\"param_tree\"] = param_tree_list\n",
    "\n",
    "            # Assigning each structure of skeleton a different ID, for future calculation of the mode skeleton in a subset:\n",
    "            is_in = False\n",
    "            for skeleton_key, item in skeleton_dict.items():\n",
    "                if skeleton_list == item:\n",
    "                    df_dict[\"skeleton_id\"] = skeleton_key\n",
    "                    is_in = True\n",
    "                    break\n",
    "            if not is_in:\n",
    "                df_dict[\"skeleton_id\"] = len(skeleton_dict)\n",
    "                skeleton_dict[len(skeleton_dict)] = skeleton_list\n",
    "\n",
    "            # Other information:\n",
    "            df_dict[\"theory_name\"] = key\n",
    "            df_dict[\"mse_train\"], df_dict[\"mse_test\"] = theory.get_loss()\n",
    "            df_dict_list.append(df_dict)\n",
    "    df = pd.DataFrame(df_dict_list).sort_values(by = \"DL\", ascending = True)\n",
    "    df = df.rename(columns = {\"DL\": \"pred_net_DL\"})\n",
    "\n",
    "    # Cluster the expressions according to DL:\n",
    "    DLs = np.expand_dims(df[\"pred_net_DL\"].values, 1)\n",
    "    kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(DLs)\n",
    "    df[\"cluster_id\"] = kmeans.labels_\n",
    "    df_sympy = df.reset_index()[[\"theory_name\", \"pred_net_DL\", \"cluster_id\", \"numerical_expression\", \"symbolic_expression\", \"skeleton\", \"param_tree\", \"skeleton_id\", \"mse_train\", \"mse_test\", \"param_dict\"]]\n",
    "\n",
    "    exprs_unified_list = []\n",
    "    for cluster_id in range(num_clusters):\n",
    "        # Find the mode of skeleton for each cluster:\n",
    "        df_cluster = df[df[\"cluster_id\"] == cluster_id]\n",
    "        skeleton_mode = df_cluster[\"skeleton_id\"].mode()\n",
    "        if len(skeleton_mode) > 1:\n",
    "            print(\"there are more than one modes in cluster {}\".format(cluster_id))\n",
    "        df_cluster_mode = df_cluster[df_cluster[\"skeleton_id\"] == skeleton_mode[0]]\n",
    "        param_tree_list = list(df_cluster_mode[\"param_tree\"].values)\n",
    "        # Jointly traverse all the numerical expressions with the same skeleton, replacing different\n",
    "        # numerical values with a new parameter:\n",
    "        param_unification, param_list = joint_traverse(param_tree_list,\n",
    "                                                       param_list=[],\n",
    "                                                       fraction_threshold=fraction_threshold,\n",
    "                                                       relative_diff_threshold=relative_diff_threshold,\n",
    "                                                      )\n",
    "        exprs_unified = []\n",
    "        for k in range(len(df_cluster_mode.iloc[0][\"skeleton\"])):\n",
    "            is_valid, expr = assign_param_to_skeleton(df_cluster_mode.iloc[0][\"skeleton\"][k], param_unification[k])\n",
    "            assert is_valid, \"The unification is not valid. Check code!\"\n",
    "            exprs_unified.append(expr)\n",
    "        exprs_unified_list.append(exprs_unified)\n",
    "\n",
    "    if verbose:\n",
    "        # Printing:\n",
    "        print(\"Unified prediction functions:\")\n",
    "        pp.pprint(exprs_unified_list)\n",
    "        print(\"\\nTable of symbolic prediction functions:\")\n",
    "        display(df)\n",
    "    return df_sympy, exprs_unified_list\n",
    "\n",
    "\n",
    "\n",
    "def get_skeleton(expr):\n",
    "    \"\"\"Recursively obtain the skeleton and corresponding parameter-tree (in terms of lists of lists) of a symbolic expression, \n",
    "       where the skeleton replaces all numerical values by a symbol 'p', and the param_tree records the corresponding numerical parameter.\n",
    "    \"\"\"\n",
    "    args = []\n",
    "    param_tree = []\n",
    "    for arg in expr.args:\n",
    "        if isinstance(arg, sympy.numbers.Float) or isinstance(arg, sympy.numbers.Integer):\n",
    "            arg_store = Symbol('p')\n",
    "            param_value = arg\n",
    "        elif isinstance(arg, sympy.symbol.Symbol):\n",
    "            arg_store = arg\n",
    "            param_value = None\n",
    "        else:\n",
    "            # Recursion:\n",
    "            arg_store, param_value = get_skeleton(arg)\n",
    "        args.append(arg_store)\n",
    "        param_tree.append(param_value)\n",
    "    skeleton = expr.func(*args)\n",
    "    \n",
    "    # From all the permutations on the branch of param_tree, find the one that has the same order as the skeleton:\n",
    "    is_match = False\n",
    "    for param_tree_permute in itertools.permutations(deepcopy(param_tree)):\n",
    "        is_valid, assigned_expr = assign_param_to_skeleton(skeleton, param_tree_permute)\n",
    "        if is_valid and assigned_expr == expr:\n",
    "            is_match = True\n",
    "            break\n",
    "    if not is_match:\n",
    "        raise Exception(\"Matching param_tree is not found!\")\n",
    "    param_tree = list(param_tree_permute)\n",
    "    return skeleton, param_tree\n",
    "\n",
    "\n",
    "def joint_traverse(exprs, param_list, fraction_threshold=1, relative_diff_threshold=0, pivot_type=\"mode\"):\n",
    "    \"\"\"Recursively jointly traverse all the exprs with the same skeleton, replacing different numerical values with\n",
    "       a unification parameter \"p{}\".\n",
    "    \"\"\"\n",
    "    # Base case:\n",
    "    if exprs[0] is None:\n",
    "        return exprs[0], param_list\n",
    "    \n",
    "    elif isinstance(exprs[0], sympy.numbers.Float) or isinstance(exprs[0], sympy.numbers.Integer):\n",
    "        is_same = check_same_number(exprs, \n",
    "                                    fraction_threshold=fraction_threshold,\n",
    "                                    relative_diff_threshold=relative_diff_threshold,\n",
    "                                    pivot_type=pivot_type,\n",
    "                                   )\n",
    "        # If not the same number, replace by a parameter:\n",
    "        if not is_same:\n",
    "            new_param = Symbol('p{}'.format(len(param_list)))\n",
    "            param_list.append(new_param)\n",
    "            return new_param, param_list\n",
    "        # Otherwise keep this same number:\n",
    "        else:\n",
    "            return exprs[0], param_list\n",
    "\n",
    "    # If it is a single symbol, keep this symbol:\n",
    "    elif isinstance(exprs[0], sympy.symbol.Symbol):\n",
    "        is_same = check_same_exact(exprs)\n",
    "        if not is_same:\n",
    "            print(\"The symbols at the same position are not the same!\")\n",
    "        return exprs[0], param_list\n",
    "    \n",
    "    # Obtain the args_same_pos_list, i.e. the list of args_same_pos which are at the same position for all exprs:\n",
    "    num_args = len(exprs[0])\n",
    "    args_same_pos_list = [[] for _ in range(num_args)]\n",
    "    for args in exprs:\n",
    "        for i in range(num_args):\n",
    "            args_same_pos_list[i].append(args[i])\n",
    "    \n",
    "    # jointly traverse each individual arg_same_pos:\n",
    "    param_uni_list = []\n",
    "    for args_same_pos in args_same_pos_list:\n",
    "        param_uni, param_list = joint_traverse(args_same_pos,\n",
    "                                               param_list,\n",
    "                                               fraction_threshold=fraction_threshold,\n",
    "                                               relative_diff_threshold=relative_diff_threshold,\n",
    "                                               pivot_type=pivot_type,\n",
    "                                              )\n",
    "        param_uni_list.append(param_uni)\n",
    "    return param_uni_list, param_list\n",
    "\n",
    "\n",
    "def assign_param_to_skeleton(skeleton, param_tree):\n",
    "    \"\"\"Recursively assign param_tree to skeleton\"\"\"\n",
    "    from numbers import Number\n",
    "    if isinstance(skeleton, sympy.symbol.Symbol):\n",
    "        if skeleton == Symbol(\"p\"):\n",
    "            if isinstance(param_tree, sympy.numbers.Float) or isinstance(param_tree, sympy.numbers.Integer) or isinstance(param_tree, Number):\n",
    "                # Assigning a numerical value to \"p\":\n",
    "                is_valid = True\n",
    "                return is_valid, param_tree\n",
    "            elif isinstance(param_tree, sympy.symbol.Symbol) and param_tree.name.startswith(\"p\"):\n",
    "                # Assigning a symbol starting with \"p\" (e.g. \"p1\", \"p2\") to \"p\":\n",
    "                is_valid = True\n",
    "                return is_valid, param_tree\n",
    "            else:\n",
    "                # Otherwise the assignment is not valid:\n",
    "                is_valid = False\n",
    "                return is_valid, skeleton\n",
    "        else:\n",
    "            if param_tree is None:\n",
    "                is_valid = True\n",
    "                return is_valid, skeleton\n",
    "            else:\n",
    "                is_valid = False\n",
    "                return is_valid, skeleton\n",
    "    elif isinstance(skeleton, sympy.numbers.Float) or isinstance(skeleton, sympy.numbers.Integer):\n",
    "        raise Exception(\"Skeleton cannot have numerical parameters!\")\n",
    "    else:\n",
    "        if not isinstance(param_tree, list) and not isinstance(param_tree, tuple):\n",
    "            return False, skeleton\n",
    "        if len(skeleton.args) != len(param_tree):\n",
    "            return False, skeleton\n",
    "        is_valid = True\n",
    "        sub_skeleton_list = []\n",
    "        for sub_skeleton, param in zip(skeleton.args, param_tree):\n",
    "            is_valid_sub, sub_skeleton_assigned = assign_param_to_skeleton(sub_skeleton, param)\n",
    "            is_valid = is_valid and is_valid_sub\n",
    "            sub_skeleton_list.append(sub_skeleton_assigned)\n",
    "        return is_valid, skeleton.func(*sub_skeleton_list)\n",
    "\n",
    "\n",
    "def check_same_exact(args):\n",
    "    \"\"\"Check if all the expressions in args has the same numerical value or symbol\"\"\"\n",
    "    number = args[0]\n",
    "    is_same = True\n",
    "    for arg in args:\n",
    "        if arg != number:\n",
    "            is_same = False\n",
    "            break\n",
    "    return is_same\n",
    "\n",
    "\n",
    "def check_same_number(args, fraction_threshold=1, relative_diff_threshold=0, pivot_type=\"mode\"):\n",
    "    \"\"\"Check if all the expressions in args_same_pos has the same numerical value, where the fraction of numbers\n",
    "    that is within relative_diff_threshold with the pivot_type (choose from 'mode' or 'mean') is above the fraction_threshold.\n",
    "    The function with default parameters has the same behavior as check_same_exact().\n",
    "    \"\"\"\n",
    "    args_np = []\n",
    "    for arg in args:\n",
    "        if isinstance(arg, sympy.numbers.Float):\n",
    "            args_np.append(float(arg))\n",
    "        elif isinstance(arg, sympy.numbers.Integer):\n",
    "            args_np.append(int(arg))\n",
    "        else:\n",
    "            args_np.append(arg)\n",
    "    args_np = np.array(args_np)\n",
    "    if pivot_type == \"mean\":\n",
    "        pivot = np.mean(args)\n",
    "    elif pivot_type == \"mode\":\n",
    "        pivot = scipy.stats.mode(args)[0][0]\n",
    "    else:\n",
    "        raise\n",
    "    \n",
    "    count = (np.abs((args_np - pivot) / pivot) <= relative_diff_threshold).sum()\n",
    "    if count / len(args) >= fraction_threshold:\n",
    "        is_same = True\n",
    "    else:\n",
    "        is_same = False\n",
    "    return is_same"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Classes for theory, master_theory and theory_hub:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Theory_Tuple(object):\n",
    "    \"\"\"A theory_tuple contains the individual prediction function (pred_net, either in neural network or symbolic format), \n",
    "       its corresponding domain_net (a subclassifier that only takes one of the logit of the full domain classifier),\n",
    "       and the dataset it is based on.\n",
    "    \"\"\"\n",
    "    def __init__(self, pred_net, domain_net, dataset, is_Lagrangian = False, is_cuda = False):\n",
    "        if isinstance(pred_net, dict):\n",
    "            pred_net = load_model_dict_net(pred_net, is_cuda = is_cuda)\n",
    "        if isinstance(domain_net, dict):\n",
    "            domain_net = load_model_dict_net(domain_net, is_cuda = is_cuda)\n",
    "        assert pred_net.__class__.__name__ == \"MLP\"\n",
    "        assert domain_net.__class__.__name__ == \"MLP\"\n",
    "        self.pred_net = pred_net\n",
    "        self.domain_net = domain_net\n",
    "        self.dataset = dataset\n",
    "        self.is_Lagrangian = is_Lagrangian\n",
    "        self.is_cuda = is_cuda\n",
    "\n",
    "    @property\n",
    "    def model_dict(self):\n",
    "        model_dict = {\"type\": \"Theory_Tuple\"}\n",
    "        model_dict[\"pred_net\"] = deepcopy(self.pred_net.model_dict)\n",
    "        model_dict[\"domain_net\"] = deepcopy(self.domain_net.model_dict)\n",
    "        model_dict[\"dataset\"] = self.dataset\n",
    "        model_dict[\"is_Lagrangian\"] = self.is_Lagrangian\n",
    "        return model_dict\n",
    "    \n",
    "    def load_model_dict(self, model_dict):\n",
    "        new_theory_tuple = load_model_dict_theory_tuple(model_dict, is_cuda = self.is_cuda)\n",
    "        self.__dict__.update(new_theory_tuple.__dict__)\n",
    "    \n",
    "    def get_loss(self):\n",
    "        ((X_train, y_train), (X_test, y_test), _), _ = self.dataset\n",
    "        if len(X_train.size()) == 0:\n",
    "            mse_train = None\n",
    "        else:\n",
    "            if self.is_Lagrangian:\n",
    "                mse_train = to_np_array(get_criterion(\"mse\")(get_Lagrangian_loss(self.pred_net, X_train), y_train))\n",
    "            else:\n",
    "                mse_train = to_np_array(get_criterion(\"mse\")(self.pred_net(X_train), y_train))\n",
    "        if len(X_test.size()) == 0:\n",
    "            mse_test = None\n",
    "        else:\n",
    "            if self.is_Lagrangian:\n",
    "                mse_test = to_np_array(get_criterion(\"mse\")(get_Lagrangian_loss(self.pred_net, X_test), y_test))\n",
    "            else:\n",
    "                mse_test = to_np_array(get_criterion(\"mse\")(self.pred_net(X_test), y_test))\n",
    "        return mse_train, mse_test\n",
    "    \n",
    "    def plot(self, is_train = True, **kwargs):\n",
    "        ((X_train, y_train), (X_test, y_test), _), _ = self.dataset\n",
    "        X = X_train if is_train else X_test\n",
    "        pred = self.pred_net(X)\n",
    "        plot3D(X, pred, figsize = (6,6))\n",
    "        plot_indi_domain(X, torch.zeros(len(X)).long(), images_per_row = 2, row_width = 12, row_height = 5, **kwargs)\n",
    "\n",
    "    def simplify(self, mode, **kwargs):\n",
    "        ((X_train, y_train), _, _), _ = self.dataset\n",
    "        self.pred_net.simplify(X_train, y_train, mode = mode, **kwargs)\n",
    "\n",
    "\n",
    "class Master_Theory_Tuple(object):\n",
    "    \"\"\"A mster_theory_tuple contains the master theory and the theory_tuples it is based on.\"\"\"\n",
    "    def __init__(self, master_theory, theory_tuples, is_cuda = False):\n",
    "        assert isinstance(theory_tuples, dict)\n",
    "        self.master_theory = master_theory\n",
    "        self.theory_tuples = theory_tuples\n",
    "        self.is_cuda = is_cuda\n",
    "\n",
    "    @property\n",
    "    def model_dict(self):\n",
    "        model_dict = {\"type\": \"Master_Theory_Tuple\"}\n",
    "        model_dict[\"master_theory\"] = deepcopy(self.master_theory.model_dict)\n",
    "        model_dict[\"theory_tuples\"] = deepcopy({name: theory_tuple.model_dict for name, theory_tuple in self.theory_tuples.items()})\n",
    "        return model_dict\n",
    "    \n",
    "    def load_model_dict(self, model_dict):\n",
    "        new_master_theory_tuple = load_model_dict_master_theory_tuple(model_dict, is_cuda = self.is_cuda)\n",
    "        self.__dict__.update(new_master_theory_tuple.__dict__)\n",
    "\n",
    "\n",
    "\n",
    "class Master_Theory(nn.Module):\n",
    "    \"\"\"Master_theory can generate a continuum of theories.\"\"\"\n",
    "    def __init__(\n",
    "        self,\n",
    "        input_size,\n",
    "        output_size,\n",
    "        pre_pooling_neurons,\n",
    "        struct_param_statistics_Net,\n",
    "        struct_param_classifier,\n",
    "        pooling = \"max\",\n",
    "        settings_statistics_Net = {\"activation\": \"leakyRelu\"},\n",
    "        settings_classifier = {\"activation\": \"leakyRelu\"},\n",
    "        is_cuda = False,\n",
    "        ):\n",
    "        super(Master_Theory, self).__init__()\n",
    "        self.input_size = input_size\n",
    "        self.output_size = output_size\n",
    "        self.statistics_Net = Statistics_Net(input_size = input_size + output_size,\n",
    "                                             pre_pooling_neurons = pre_pooling_neurons,\n",
    "                                             struct_param_pre = struct_param_statistics_Net[0],\n",
    "                                             struct_param_post = struct_param_statistics_Net[1],\n",
    "                                             pooling = pooling,\n",
    "                                             settings = settings_statistics_Net,\n",
    "                                             is_cuda = is_cuda,\n",
    "                                            )\n",
    "        self.classifier = MLP(input_size = input_size,\n",
    "                              struct_param = struct_param_classifier,\n",
    "                              settings = settings_classifier,\n",
    "                              is_cuda = is_cuda,\n",
    "                             )\n",
    "        self.latent_param = None\n",
    "        self.is_cuda = is_cuda\n",
    "        if self.is_cuda:\n",
    "            self.cuda()\n",
    "\n",
    "\n",
    "    @property\n",
    "    def model_dict(self):\n",
    "        model_dict = {\"type\": \"Master_Theory\"}\n",
    "        model_dict[\"input_size\"] = self.input_size\n",
    "        model_dict[\"master_model\"] = self.master_model.model_dict if hasattr(self, \"master_model\") else None\n",
    "        model_dict[\"master_model_type\"] = self.master_model_type if hasattr(self, \"master_model_type\") else None\n",
    "        model_dict[\"statistics_Net\"] = self.statistics_Net.model_dict\n",
    "        model_dict[\"classifier\"] = self.classifier.model_dict\n",
    "        model_dict[\"latent_param\"] = self.latent_param\n",
    "        return model_dict\n",
    "\n",
    "\n",
    "    def load_model_dict(self, model_dict):\n",
    "        new_master_theory = load_model_dict_master_theory(model_dict, is_cuda = self.is_cuda)\n",
    "        self.__dict__.update(new_master_theory.__dict__)\n",
    "\n",
    "\n",
    "    def forward(self, input, latent_param = None, X = None, y = None):\n",
    "        if X is not None or y is not None:\n",
    "            assert latent_param is None\n",
    "            latent_param = self.calculate_latent_param(X, y)\n",
    "        elif latent_param is None:\n",
    "            latent_param = self.latent_param\n",
    "        if self.master_model_type == \"symbolic\":\n",
    "            latent_param = {0: {param_name: latent_param[0, k] for k, param_name in enumerate(self.param_name_list)}}\n",
    "        elif self.master_model_type == \"regulated-Net\":\n",
    "            latent_param = get_regulated_latent_param(self.master_model, latent_param)\n",
    "        return self.master_model(input, latent_param)\n",
    "\n",
    "\n",
    "    def get_regularization(self, targets, source = [\"weight\", \"bias\"], mode = \"L1\"):\n",
    "        reg = Variable(torch.FloatTensor(np.array([0])))\n",
    "        if self.is_cuda:\n",
    "            reg = reg.cuda()\n",
    "        for target in targets:\n",
    "            if target == \"master_model\":\n",
    "                reg = reg + self.master_model.get_regularization(source = source, mode = mode)\n",
    "            elif target == \"statistics_Net\":\n",
    "                reg = reg + self.statistics_Net.get_regularization(source = source, mode = mode)\n",
    "            elif target == \"classifier\":\n",
    "                reg = reg + self.classifier.get_regularization(source = source, mode = mode)\n",
    "            else:\n",
    "                raise Exception(\"target {0} not recognized!\".format(target))\n",
    "        return reg\n",
    "\n",
    "\n",
    "    def propose_master_model(\n",
    "        self,\n",
    "        theory_collection,\n",
    "        input_size,\n",
    "        statistics_output_neurons,\n",
    "        master_model_type=\"regulated-Net\",\n",
    "        symbolic_expression=None,\n",
    "        **kwargs\n",
    "        ):\n",
    "        \"\"\"Propose the master model (the master prediction function that unifies multiple prediction functions in theories)\n",
    "        based on a collection of theories.\n",
    "        \"\"\"\n",
    "        self.master_model_type = master_model_type\n",
    "        \n",
    "        if master_model_type == \"regulated-Net\":\n",
    "            struct_param_regulated_Net = kwargs[\"struct_param_regulated_Net\"] if \"struct_param_regulated_Net\" in kwargs else [\n",
    "                    [8, \"Simple_Layer\", {}],\n",
    "                    [8, \"Simple_Layer\", {}],\n",
    "                    [self.output_size, \"Simple_Layer\", {\"activation\": \"linear\"}],\n",
    "            ]\n",
    "            assert len(struct_param_regulated_Net) * 2 == statistics_output_neurons or len(struct_param_regulated_Net) == statistics_output_neurons\n",
    "            activation_regulated_Net = kwargs[\"activation_regulated_Net\"] if \"activation_regulated_Net\" in kwargs else \"leakyRelu\"\n",
    "            self.master_model = MLP(input_size = input_size,\n",
    "                                    struct_param = struct_param_regulated_Net,\n",
    "                                    settings = {\"activation\": activation_regulated_Net},\n",
    "                                    is_cuda = self.is_cuda,\n",
    "                                   )            \n",
    "        elif master_model_type == \"symbolic\":\n",
    "            model_dict = {\n",
    "                \"type\": \"MLP\",\n",
    "                \"input_size\": input_size,\n",
    "                \"struct_param\": [[len(symbolic_expression), \"Symbolic_Layer\", {\"symbolic_expression\": str(symbolic_expression)}]]\n",
    "            }\n",
    "            self.param_name_list = get_param_name_list(symbolic_expression)\n",
    "            self.master_model = load_model_dict(model_dict, is_cuda=self.is_cuda)\n",
    "        elif master_model_type == \"generative_Net\":\n",
    "            struct_param_gen_base = kwargs[\"struct_param_gen_base\"] if \"struct_param_gen_base\" in kwargs else [\n",
    "                    [60, \"Simple_Layer\", {}],\n",
    "                    [60, \"Simple_Layer\", {}],\n",
    "                    [60, \"Simple_Layer\", {}],\n",
    "            ]\n",
    "            activation_generative = kwargs[\"activation_generative\"] if \"activation_generative\" in kwargs else \"leakyRelu\"\n",
    "            activation_model = kwargs[\"activation_model\"] if \"activation_model\" in kwargs else \"leakyRelu\"\n",
    "            num_context_neurons = kwargs[\"num_context_neurons\"] if \"num_context_neurons\" in kwargs else 0\n",
    "            layer_type = \"Simple_Layer\"\n",
    "\n",
    "            struct_param_weight1 = struct_param_gen_base + [[(input_size, 20), layer_type, {\"activation\": \"linear\"}]]\n",
    "            struct_param_weight2 = struct_param_gen_base + [[(20, 20), layer_type, {\"activation\": \"linear\"}]]\n",
    "            struct_param_weight3 = struct_param_gen_base + [[(20, self.output_size), layer_type, {\"activation\": \"linear\"}]]\n",
    "            struct_param_bias1 = struct_param_gen_base + [[20, layer_type, {\"activation\": \"linear\"}]]\n",
    "            struct_param_bias2 = struct_param_gen_base + [[20, layer_type, {\"activation\": \"linear\"}]]\n",
    "            struct_param_bias3 = struct_param_gen_base + [[self.output_size, layer_type,  {\"activation\": \"linear\"}]]\n",
    "\n",
    "            self.master_model = Generative_Net(input_size = statistics_output_neurons,\n",
    "                                                num_context_neurons = num_context_neurons,\n",
    "                                                W_struct_param_list = [struct_param_weight1, struct_param_weight2, struct_param_weight3],\n",
    "                                                b_struct_param_list = [struct_param_bias1, struct_param_bias2, struct_param_bias3],\n",
    "                                                settings_generative = {\"activation\": activation_generative},\n",
    "                                                settings_model = {\"activation\": activation_model},\n",
    "                                                learnable_latent_param = False,\n",
    "                                                is_cuda = self.is_cuda,\n",
    "                                               )\n",
    "        else:\n",
    "            raise Exception(\"mode {0} not recognized!\".format(master_model_type))\n",
    "        return self.master_model\n",
    "    \n",
    "\n",
    "    def get_parameters(self, targets):\n",
    "        params = []\n",
    "        for target in targets:\n",
    "            if target == \"master_model\":\n",
    "                params.append(self.master_model.parameters())\n",
    "            elif target == \"statistics_Net\":\n",
    "                params.append(self.statistics_Net.parameters())\n",
    "            elif target == \"classifier\":\n",
    "                params.append(self.classifier.parameters())\n",
    "            elif target == \"latent_param\":\n",
    "                if self.latent_param is not None:\n",
    "                    params.append([self.latent_param])\n",
    "            else:\n",
    "                raise Exception(\"target {0} not recognized!\".format(target))\n",
    "        return itertools.chain(*params)\n",
    "\n",
    "\n",
    "    def set_latent_param(self, latent_param):\n",
    "        assert isinstance(latent_param, Variable), \"The latent_param must be a Variable!\"\n",
    "        if self.latent_param is not None:\n",
    "            self.latent_param.data.copy_(latent_param.data)\n",
    "        else:\n",
    "            self.latent_param = nn.Parameter(latent_param.data)\n",
    "\n",
    "\n",
    "    def calculate_latent_param(self, X, y):\n",
    "        return self.statistics_Net(torch.cat([X, y], 1))\n",
    "\n",
    "\n",
    "    def propose_theory_model_from_latent_param(self, latent_param):\n",
    "        if self.master_model_type == \"symbolic\":\n",
    "            latent_param = {0: {param_name: latent_param[0, k] for k, param_name in enumerate(self.param_name_list)}}\n",
    "        elif self.master_model_type == \"regulated-Net\":\n",
    "            latent_param = get_regulated_latent_param(self.master_model, latent_param)\n",
    "        theory_model = deepcopy(self.master_model)\n",
    "        theory_model.init_with_p_dict(latent_param)\n",
    "        return theory_model\n",
    "\n",
    "\n",
    "    def propose_theory_model_from_data(self, X, y):\n",
    "        # Select data and predict the latent variable:\n",
    "        latent_param_list = []\n",
    "        thresholds = np.linspace(0.99, 0.5, 99)\n",
    "        minimum_positive = max(len(y) * 0.005, 100)\n",
    "        count = 0\n",
    "        for threshold in thresholds:\n",
    "            u_pred = (nn.Softmax(dim = 1)(self.classifier(X))[:, 1] > threshold).long()\n",
    "            if to_np_array(u_pred.sum()) > minimum_positive:\n",
    "                X_chosen = torch.masked_select(X, u_pred.byte().unsqueeze(1).detach()).view(-1, X.size(1))\n",
    "                y_chosen = torch.masked_select(y, u_pred.byte().unsqueeze(1).detach()).view(-1, y.size(1))\n",
    "                latent_param = self.calculate_latent_param(X_chosen, y_chosen)\n",
    "                latent_param_list.append(latent_param)\n",
    "                count += 1\n",
    "                if count > 2:\n",
    "                    break\n",
    "        if count > 0:\n",
    "            latent_param_mean = torch.stack(latent_param_list, -1).mean(-1)\n",
    "\n",
    "            # Propose theory_model:\n",
    "            theory_model = self.propose_theory_model_from_latent_param(latent_param_mean)\n",
    "            return theory_model\n",
    "        else:\n",
    "            return None\n",
    "\n",
    "\n",
    "class Theory_Hub(object):\n",
    "    \"\"\"The theory_hub stores theories and master theories, and contains methods for symbolic unification,\n",
    "    adding theories to hub, proposing new theories based on new data, and propose network master theories \n",
    "    based on a collection of theories.\n",
    "    \"\"\"\n",
    "    def __init__(self, is_cuda = False):\n",
    "        self.theory_collection = OrderedDict()\n",
    "        self.master_theory_collection = OrderedDict()\n",
    "        self.is_cuda = is_cuda\n",
    "\n",
    "    @property\n",
    "    def model_dict(self):\n",
    "        model_dict = {\"type\": \"Theory_Hub\", \"theory_collection\": OrderedDict(), \"master_theory_collection\": OrderedDict()}\n",
    "        for name, theory_tuple in self.theory_collection.items():\n",
    "            model_dict[\"theory_collection\"][name] = theory_tuple.model_dict\n",
    "        for name, master_theory_tuple in self.master_theory_collection.items():\n",
    "            model_dict[\"master_theory_collection\"][name] = master_theory_tuple.model_dict\n",
    "        return model_dict\n",
    "\n",
    "    def load_model_dict(self, model_dict):\n",
    "        new_theory_hub = load_model_dict_theory_hub(model_dict, is_cuda = self.is_cuda)\n",
    "        self.__dict__.update(new_theory_hub.__dict__)\n",
    "\n",
    "    @property\n",
    "    def theory(self):\n",
    "        return self.theory_collection\n",
    "\n",
    "    @property\n",
    "    def master_theory(self):\n",
    "        return self.master_theory_collection\n",
    "\n",
    "\n",
    "    def add_theories(\n",
    "        self,\n",
    "        name, \n",
    "        pred_nets,\n",
    "        domain_net,\n",
    "        dataset,\n",
    "        verbose = True,\n",
    "        threshold = 1e-3,\n",
    "        is_Lagrangian = False,\n",
    "        ):\n",
    "        if \"{0}_0\".format(name) in self.theory_collection:\n",
    "            print(\"Name {0} collision! Please change to a different name!\".format(\"{0}_0\".format(name)))\n",
    "            added_theory_info = {}\n",
    "        else:\n",
    "            added_theory_info = {}\n",
    "            if domain_net.__class__.__name__ == \"MLP\":\n",
    "                domain_net = domain_net.split_to_model_ensemble(mode = \"standardize\")\n",
    "            else:\n",
    "                domain_net.standardize(mode = \"b_mean_zero\")\n",
    "            num_models = domain_net.num_models\n",
    "\n",
    "            assert pred_nets.num_models == num_models, \"pred_nets must have the same num_models as the domain_net!\"\n",
    "            ((X_train, y_train), (X_test, y_test), (reflect_train, reflect_test)), info = dataset\n",
    "            valid_train = to_one_hot(domain_net(X_train).max(1)[1], num_models).byte()\n",
    "            valid_test = to_one_hot(domain_net(X_test).max(1)[1], num_models).byte()\n",
    "            dataset_all = {}\n",
    "\n",
    "            for i in range(num_models):\n",
    "                theory_name = \"{0}_{1}\".format(name, i)\n",
    "                X_train_split = torch.masked_select(X_train, valid_train[:,i:i+1]).view(-1, X_train.size(1))\n",
    "                y_train_split = torch.masked_select(y_train, valid_train[:,i:i+1]).view(-1, y_train.size(1))\n",
    "                if reflect_train is not None:\n",
    "                    reflect_train_split = torch.masked_select(reflect_train, valid_train[:,i])\n",
    "                else:\n",
    "                    reflect_train_split = None\n",
    "\n",
    "                X_test_split = torch.masked_select(X_test, valid_test[:,i:i+1]).view(-1, X_test.size(1))\n",
    "                y_test_split = torch.masked_select(y_test, valid_test[:,i:i+1]).view(-1, y_test.size(1))\n",
    "                if reflect_test is not None:\n",
    "                    reflect_test_split = torch.masked_select(reflect_test, valid_test[:,i])\n",
    "                else:\n",
    "                    reflect_test_split = None\n",
    "\n",
    "                dataset_split = (((X_train_split, y_train_split), (X_test_split, y_test_split), (reflect_train_split, reflect_test_split)), deepcopy(info))\n",
    "                theory_tuple = Theory_Tuple(pred_net = getattr(pred_nets, \"model_{0}\".format(i)),\n",
    "                                            domain_net = getattr(domain_net, \"model_{0}\".format(i)),\n",
    "                                            dataset = dataset_split,\n",
    "                                            is_Lagrangian = is_Lagrangian,\n",
    "                                            is_cuda = self.is_cuda,\n",
    "                                           )\n",
    "                mse_train, mse_test = theory_tuple.get_loss()\n",
    "                if mse_train is None or mse_test is None:\n",
    "                    if verbose:\n",
    "                        print(\"theory {0} NOT added because its domain_net does not classify any data for the model.\".format(theory_name))\n",
    "                elif mse_train > threshold or mse_test > threshold:\n",
    "                    if verbose:\n",
    "                        print(\"theory {0} NOT added! mse_train = {1:.9}\\tmse_test = {2:.9}\".format(theory_name, mse_train, mse_test))\n",
    "                else:\n",
    "                    self.theory_collection[theory_name] = theory_tuple\n",
    "                    added_theory_info[theory_name] = {\"mse_train\": mse_train, \"mse_test\": mse_test}\n",
    "                    if verbose:\n",
    "                        print(\"theory {0} added! mse_train = {1:.9}\\tmse_test = {2:.9}\".format(theory_name, mse_train, mse_test))    \n",
    "        return added_theory_info\n",
    "\n",
    "\n",
    "    def add_theories_from_info_dict(\n",
    "        self,\n",
    "        name,\n",
    "        info_dict,\n",
    "        pred_nets_target = \"pred_nets_simplified\",\n",
    "        domain_net_target = \"domain_net_simplified_final\",\n",
    "        dataset_target = \"dataset\",\n",
    "        ):\n",
    "        pred_nets = load_model_dict_model_ensemble(info_dict[pred_nets_target], is_cuda = self.is_cuda)\n",
    "        domain_net = load_model_dict_net(info_dict[domain_net_target], is_cuda = self.is_cuda)\n",
    "        dataset = info_dict[\"dataset\"]\n",
    "        self.add_theories(name, pred_nets, domain_net, dataset)\n",
    "    \n",
    "    \n",
    "    def add_master_theory_group_list(\n",
    "        self,\n",
    "        group_list,\n",
    "        is_replace = False,\n",
    "        ):\n",
    "        for master_theory_dict, theory_dict in group_list:\n",
    "            assert len(master_theory_dict) == 1\n",
    "            name = list(master_theory_dict.keys())[0]\n",
    "            master_theory = master_theory_dict[name]\n",
    "            self.add_master_theory(name, master_theory, theory_dict, is_replace = is_replace)      \n",
    "\n",
    "\n",
    "    def add_master_theory(\n",
    "        self,\n",
    "        name,\n",
    "        master_theory,\n",
    "        theory_tuples,\n",
    "        is_replace = False,\n",
    "        ):\n",
    "        assert isinstance(theory_tuples, dict), \"theory_tuples must be a dictionary of theory_tuples!\"\n",
    "        if is_replace:\n",
    "            master_theory_tuple = Master_Theory_Tuple(master_theory, theory_tuples, is_cuda = self.is_cuda)\n",
    "            self.master_theory_collection[name] = master_theory_tuple\n",
    "        else:\n",
    "            if name in self.master_theory:\n",
    "                print(\"Name {0} collision! Please change to a different name!\".format(name))\n",
    "            else:\n",
    "                master_theory_tuple = Master_Theory_Tuple(master_theory, theory_tuples, is_cuda = self.is_cuda)\n",
    "                self.master_theory_collection[name] = master_theory_tuple\n",
    "\n",
    "\n",
    "    def remove_theories(self, names = None, threshold = None, verbose = True):\n",
    "        if not isinstance(names, list):\n",
    "            names = [names]\n",
    "        popped_theories = OrderedDict()\n",
    "        if names is not None:\n",
    "            assert threshold is None\n",
    "            for name in names:\n",
    "                popped_theories[name] = self.theory_collection.pop(name)\n",
    "                if verbose:\n",
    "                    print(\"Theory {0} poped!\".format(name))\n",
    "        elif threshold is not None:\n",
    "            mse_dict = self.get_loss()\n",
    "            for name, (mse_train, mse_test) in mse_dict.items():\n",
    "                if mse_train > threshold or mse_test > threshold:\n",
    "                    popped_theories[name] = self.theory_collection.pop(name)\n",
    "                    if verbose:\n",
    "                        print(\"Theory {0}'s mse_train = {1:.9f}, mse_test = {2:.9f}, larger than threshold, popped!\".format(name, mse_train, mse_test))\n",
    "        else:\n",
    "            raise\n",
    "        return popped_theories\n",
    "\n",
    "\n",
    "    def remove_master_theory(self, names, verbose = True):\n",
    "        if not isinstance(names, list):\n",
    "            names = [names]\n",
    "        popped_master_theories = OrderedDict()\n",
    "        for name in names:\n",
    "            popped_master_theories[name] = self.master_theory_collection.pop(name)\n",
    "            if verbose:\n",
    "                print(\"Master theory {0} poped!\".format(name))\n",
    "        return popped_master_theories\n",
    "    \n",
    "    \n",
    "    def get_theory_tuples(self, input_size = None):\n",
    "        if input_size is None:\n",
    "            return self.theory\n",
    "        theory_tuples = OrderedDict()\n",
    "        for key, theory_tuple in self.theory.items():\n",
    "            if theory_tuple.pred_net.input_size == input_size:\n",
    "                theory_tuples[key] = theory_tuple\n",
    "        return theory_tuples\n",
    "\n",
    "\n",
    "    def get_all_models(self):\n",
    "        all_models = OrderedDict()\n",
    "        for name, theory_tuple in self.theory_collection.items():\n",
    "            all_models[name] = theory_tuple.pred_net\n",
    "        return all_models\n",
    "\n",
    "\n",
    "    def get_pred_nets(self, input_size = None):\n",
    "        if input_size is None:\n",
    "            all_models = [theory_tuple.pred_net for theory_tuple in self.theory.values()]\n",
    "        else:\n",
    "            all_models = [theory_tuple.pred_net for theory_tuple in self.theory.values() if theory_tuple.pred_net.input_size == input_size]\n",
    "        return construct_model_ensemble_from_nets(all_models)\n",
    "\n",
    "\n",
    "    def get_loss(self):\n",
    "        mse_dict = OrderedDict()\n",
    "        for theory_name, theory_tuple in self.theory_collection.items():\n",
    "            mse_train, mse_test = theory_tuple.get_loss()\n",
    "            print(\"Theory {0}: mse_train = {1:.9}\\tmse_test = {2:.9}\".format(theory_name, mse_train, mse_test))\n",
    "            mse_dict[theory_name] = [mse_train, mse_test]\n",
    "        return mse_dict\n",
    "    \n",
    "    \n",
    "    def plot_theory(self, DL_rank = None, theory_name = None, **kwargs):\n",
    "        \"Plot simplified theory based on rank or theory_name\"\n",
    "        if not hasattr(self, \"df_sympy\"):\n",
    "            df = self.get_df_sympy()\n",
    "        else:\n",
    "            df = self.df_sympy\n",
    "        if DL_rank is not None:\n",
    "            item = df.iloc[DL_rank]\n",
    "            theory_name = item[\"theory_name\"]\n",
    "        else:\n",
    "            assert theory_name is not None\n",
    "            df = df.set_index(\"theory_name\")\n",
    "            item = df.loc[theory_name]\n",
    "        print(\"pred_net_DL: {0}\".format(item[\"pred_net_DL\"]))\n",
    "        print(\"numerical_expression: {0}\".format(item[\"numerical_expression\"]))\n",
    "        print(\"mse_train: {0}\\tmse_test: {1}\".format(item[\"mse_train\"], item[\"mse_test\"]))\n",
    "        theory = self.theory[theory_name]\n",
    "        theory.plot(**kwargs)\n",
    "\n",
    "\n",
    "    def plot(self, target = \"theory\", keys = None, is_train = True):\n",
    "        if target == \"theory\":\n",
    "            for name, theory in self.theory.items():\n",
    "                if keys is None or (keys is not None and name in keys):\n",
    "                    if \"simplified\" in name:\n",
    "                        theory.pred_net.get_sympy_expression()\n",
    "                    mse_train, mse_test = theory.get_loss()\n",
    "                    print(\"{0}\\tmse_train: {1:.9f}\\tmse_test: {2:.9f}\".format(name, mse_train, mse_test))\n",
    "                    theory.plot(is_train = is_train)\n",
    "        elif target == \"master_theory\":\n",
    "            for name, master_theory in self.master_theory.items():\n",
    "                if keys is None or (keys is not None and name in keys):\n",
    "                    print(\"{0}:\".format(name))\n",
    "                    master_theory.plot(is_train = is_train)\n",
    "        else:\n",
    "            raise Exception(\"target {0} not recognized!\".format(target))\n",
    "\n",
    "    \n",
    "    def combine_pred_domain_nets(self, theory_tuples):\n",
    "        if isinstance(theory_tuples, list):\n",
    "            theory_tuples = {name: self.theory[name] for name in theory_tuples}\n",
    "        elif isinstance(theory_tuples, str):\n",
    "            if theory_tuples == \"all\":\n",
    "                theory_tuples = self.theory_collection\n",
    "        \n",
    "        pred_net_list = []\n",
    "        domain_net_list = []\n",
    "        for name, theory_tuple in theory_tuples.items():\n",
    "            pred_net_list.append(theory_tuple.pred_net)\n",
    "            domain_net_list.append(theory_tuple.domain_net)\n",
    "        pred_nets = construct_model_ensemble_from_nets(pred_net_list)\n",
    "        domain_net = construct_model_ensemble_from_nets(domain_net_list)\n",
    "        return pred_nets, domain_net\n",
    "\n",
    "\n",
    "    def combine_datasets(self, theory_tuples):\n",
    "        if isinstance(theory_tuples, list):\n",
    "            theory_tuples = {name: self.theory[name] for name in theory_tuples}\n",
    "        elif isinstance(theory_tuples, str):\n",
    "            if theory_tuples == \"all\":\n",
    "                theory_tuples = self.theory_collection\n",
    "        \n",
    "        X_train_list = []\n",
    "        X_test_list = []\n",
    "        y_train_list = []\n",
    "        y_test_list = []\n",
    "        reflect_train_list = []\n",
    "        reflect_test_list = []\n",
    "        for name, theory_tuple in theory_tuples.items():\n",
    "            ((X_train, y_train), (X_test, y_test), (reflect_train, reflect_test)), info = theory_tuple.dataset\n",
    "            X_train_list.append(X_train)\n",
    "            X_test_list.append(X_test)\n",
    "            y_train_list.append(y_train)\n",
    "            y_test_list.append(y_test)\n",
    "            if reflect_train is not None:\n",
    "                reflect_train_list.append(reflect_train)\n",
    "            if reflect_test is not None:\n",
    "                reflect_test_list.append(reflect_test)\n",
    "        \n",
    "        X_train_combined = torch.cat(X_train_list, 0)\n",
    "        X_test_combined = torch.cat(X_test_list, 0)\n",
    "        y_train_combined = torch.cat(y_train_list, 0)\n",
    "        y_test_combined = torch.cat(y_test_list, 0)\n",
    "        if len(reflect_train_list) > 0:\n",
    "            reflect_train_combined = torch.cat(reflect_train_list, 0)\n",
    "        else:\n",
    "            reflect_train_combined = None\n",
    "        if len(reflect_test_list) > 0:\n",
    "            reflect_test_combined = torch.cat(reflect_test_list, 0)\n",
    "        else:\n",
    "            reflect_test_combined = None\n",
    "        dataset = ((X_train_combined, y_train_combined), (X_test_combined, y_test_combined), (reflect_train_combined, reflect_test_combined)), info\n",
    "        return dataset\n",
    "\n",
    "\n",
    "    def propose_theory_models(self, X, y, source = [\"master_theory\", \"theory\"], types = [\"neural\", \"simplified\"], fraction_threshold = 1e-4, max_num_models = None, loss_core = \"mse\", is_Lagrangian = False, isplot = False, verbose = True):\n",
    "        if len(self.theory) == 0 and len(self.master_theory) == 0:\n",
    "            return {}, {}\n",
    "        theory_models = OrderedDict()\n",
    "        proposed_theory_models = OrderedDict()\n",
    "        input_size = X.size(1)\n",
    "\n",
    "        # Add candidate theories from master_theories:\n",
    "        if \"master_theory\" in source:\n",
    "            for name, master_theory_tuple in self.master_theory.items():\n",
    "                theory_model = master_theory_tuple.master_theory.propose_theory_model_from_data(X, y)\n",
    "                if theory_model is not None:\n",
    "                    theory_models[name] = theory_model\n",
    "\n",
    "        # Add candidate theories from theory collections:\n",
    "        if \"theory\" in source:\n",
    "            for name, theory_tuple in self.theory.items():\n",
    "                if \"simplified\" not in types:\n",
    "                    if \"simplified\" in name:\n",
    "                        continue\n",
    "                if theory_tuple.pred_net.input_size == input_size:\n",
    "                    theory_models[name] = theory_tuple.pred_net\n",
    "        \n",
    "        if len(theory_models) == 0:\n",
    "            return {}, {}\n",
    "\n",
    "        # Propose theories models whose fraction_best exceeds the fraction_threshold:\n",
    "        fraction_best_list = []\n",
    "        theory_name_list = []\n",
    "        pred_nets_combined = construct_model_ensemble_from_nets(list(theory_models.values()))\n",
    "        if is_Lagrangian:\n",
    "            preds = get_Lagrangian_loss(pred_nets_combined, X)\n",
    "        else:\n",
    "            preds = pred_nets_combined(X)\n",
    "        loss_fun_cumu = Loss_Fun_Cumu(core = loss_core, cumu_mode = \"mean\")\n",
    "        loss_indi = loss_fun_cumu(preds, y, cumu_mode = \"original\", neglect_threshold_on = False, is_mean = False)\n",
    "        best_id = to_one_hot(loss_indi.min(1)[1], loss_indi.size(1))\n",
    "        fraction_best = best_id.sum(0).float() / float(loss_indi.size(0))\n",
    "        if fraction_best.is_cuda:\n",
    "            fraction_best = fraction_best.cpu()\n",
    "        fraction_best = fraction_best.data.numpy()\n",
    "        for i, name in enumerate(list(theory_models.keys())):\n",
    "            if fraction_best[i] > fraction_threshold:\n",
    "                loss_mean = to_np_array(torch.masked_select(loss_indi[:,i], best_id[:,i].byte()).mean())\n",
    "                proposed_theory_models[name] = {\"theory_model\": theory_models[name], \"fraction_best\": fraction_best.data[i], \"loss_mean\": loss_mean}\n",
    "                theory_name_list.append(name)\n",
    "                fraction_best_list.append(fraction_best[i])\n",
    "        fraction_best_list, theory_name_list = sort_two_lists(fraction_best_list, theory_name_list, reverse = True)\n",
    "\n",
    "        k = 0\n",
    "        proposed_theory_models_sorted = OrderedDict()\n",
    "        for name in theory_name_list:\n",
    "            if max_num_models is not None and k >= max_num_models:\n",
    "                break\n",
    "            else:\n",
    "                proposed_theory_models_sorted[name] = proposed_theory_models[name]\n",
    "                k += 1\n",
    "\n",
    "        if len(proposed_theory_models_sorted) == 0:\n",
    "            return {}, {}\n",
    "        proposed_pred_nets = construct_model_ensemble_from_nets([theory_info[\"theory_model\"] for theory_info in proposed_theory_models_sorted.values()])\n",
    "        if is_Lagrangian:\n",
    "            preds = get_Lagrangian_loss(proposed_pred_nets, X)\n",
    "        else:\n",
    "            preds = proposed_pred_nets(X)\n",
    "        loss_indi = loss_fun_cumu(preds, y, cumu_mode = \"original\", neglect_threshold_on = False, is_mean = False)\n",
    "        best_id = to_one_hot(loss_indi.min(1)[1], loss_indi.size(1))\n",
    "        evaluation = {\"loss_best\": to_np_array(loss_fun_cumu(preds, y, cumu_mode = \"min\")),\n",
    "                      \"pred-based_generalized-mean_-1\": to_np_array(loss_fun_cumu(preds, y, cumu_mode = \"harmonic\"))}\n",
    "        if verbose:\n",
    "            print(\"loss_best: {0:.9f}\\tharmonic loss: {1:.9f}\".format(evaluation[\"loss_best\"], evaluation[\"pred-based_generalized-mean_-1\"]))\n",
    "            for key, item in proposed_theory_models_sorted.items():\n",
    "                print(\"{0}:\\tfraction = {1:.5f}\\tloss_mean = {2:.9f}\".format(key, item[\"fraction_best\"], item[\"loss_mean\"]))\n",
    "        if isplot:\n",
    "            print(\"target:\")\n",
    "            plot3D(X, y)\n",
    "            print(\"proposed best_prediction:\")\n",
    "            plot3D(X, preds, best_id)\n",
    "\n",
    "        return proposed_theory_models_sorted, evaluation\n",
    "\n",
    "\n",
    "    def propose_master_theory(\n",
    "        self,\n",
    "        input_size,\n",
    "        output_size,\n",
    "        statistics_output_neurons,\n",
    "        master_model_type=\"regulated-Net\",\n",
    "        symbolic_expression=None,\n",
    "        **kwargs\n",
    "        ):\n",
    "        pre_pooling_neurons = kwargs[\"pre_pooling_neurons\"] if \"pre_pooling_neurons\" in kwargs else 100\n",
    "        struct_param_pre = [\n",
    "            [64, \"Simple_Layer\", {}],\n",
    "            [64, \"Simple_Layer\", {}],\n",
    "            [64, \"Simple_Layer\", {}],\n",
    "            [pre_pooling_neurons, \"Simple_Layer\", {\"activation\": \"linear\"}],\n",
    "        ]\n",
    "        struct_param_post = [\n",
    "            [64, \"Simple_Layer\", {}],\n",
    "            [64, \"Simple_Layer\", {}],\n",
    "            [statistics_output_neurons, \"Simple_Layer\", {\"activation\": \"linear\"}],\n",
    "        ]\n",
    "        struct_param_statistics_Net = [struct_param_pre, struct_param_post]\n",
    "        struct_param_classifier = [[128, \"Simple_Layer\", {}],\n",
    "                                   [128, \"Simple_Layer\", {}],\n",
    "                                   [128, \"Simple_Layer\", {}],\n",
    "                                   [2,  \"Simple_Layer\", {\"activation\": \"linear\"}],\n",
    "                                  ]\n",
    "        master_theory = Master_Theory(input_size=input_size,\n",
    "                                      output_size=output_size,\n",
    "                                      pre_pooling_neurons=pre_pooling_neurons,\n",
    "                                      struct_param_statistics_Net=struct_param_statistics_Net,\n",
    "                                      struct_param_classifier=struct_param_classifier,\n",
    "                                      is_cuda=self.is_cuda,\n",
    "                                     )\n",
    "        master_theory.propose_master_model(theory_collection=self.theory_collection,\n",
    "                                           input_size=input_size,\n",
    "                                           statistics_output_neurons=statistics_output_neurons,\n",
    "                                           master_model_type=master_model_type,\n",
    "                                           symbolic_expression=symbolic_expression,\n",
    "                                           **kwargs\n",
    "                                          )\n",
    "        return master_theory\n",
    "\n",
    "\n",
    "    def propose_master_theories(\n",
    "        self,\n",
    "        num_master_theories,\n",
    "        input_size,\n",
    "        output_size,\n",
    "        statistics_output_neurons,\n",
    "        master_model_type=\"regulated-Net\",\n",
    "        **kwargs\n",
    "        ):\n",
    "        master_theory_dict = OrderedDict()\n",
    "        if master_model_type in [\"regulated-Net\", \"generative_Net\"]:\n",
    "            for i in range(num_master_theories):\n",
    "                master_theory_dict[\"master_{0}\".format(i)] = self.propose_master_theory(\n",
    "                    input_size = input_size,\n",
    "                    output_size = output_size,\n",
    "                    statistics_output_neurons= statistics_output_neurons,\n",
    "                    master_model_type = master_model_type,\n",
    "                    **kwargs\n",
    "                   )\n",
    "        elif master_model_type == \"symbolic\":\n",
    "            df_sympy, exprs_unified_list = unification_symbolic(\n",
    "                self.theory_collection,\n",
    "                num_clusters=num_master_theories,\n",
    "            )\n",
    "            for i in range(num_master_theories):\n",
    "                master_theory_dict[\"master_{0}\".format(i)] = self.propose_master_theory(\n",
    "                    input_size=input_size,\n",
    "                    output_size=output_size,\n",
    "                    statistics_output_neurons=max(1, len(get_param_name_list(exprs_unified_list[i]))), # The statistics net outputs the parameters for the symbolic network\n",
    "                    master_model_type=master_model_type,\n",
    "                    symbolic_expression=exprs_unified_list[i],\n",
    "                    **kwargs\n",
    "                   )\n",
    "        else:\n",
    "            raise Exception(\"master_model_type {} is not valid!\".format(master_model_type))\n",
    "        return master_theory_dict\n",
    "\n",
    "\n",
    "    def fit_master_theory(\n",
    "        self,\n",
    "        master_theory_dict,\n",
    "        theory_dict,\n",
    "        theory_dict_test = None,\n",
    "        optim_type = (\"adam\", 1e-4),\n",
    "        reg_dict = {\"master_model\": {\"weight\": 1e-6, \"bias\": 1e-6},\n",
    "                    \"statistics_Net\": {\"weight\": 1e-6, \"bias\": 1e-6}},\n",
    "        loss_core = \"huber\",\n",
    "        loss_mode = \"harmonic\",\n",
    "        loss_combine_mode = \"on-loss\",\n",
    "        num_iter = 5000,\n",
    "        inspect_interval = 50,\n",
    "        patience = 20,\n",
    "        isplot = False,\n",
    "        filename = None,\n",
    "        **kwargs\n",
    "        ):\n",
    "        if not isinstance(master_theory_dict, dict):\n",
    "            master_theory_dict = {\"master_0\": master_theory_dict}\n",
    "        self.optim_type = optim_type\n",
    "        self.reg_dict = reg_dict\n",
    "        self.loss_core = loss_core\n",
    "        self.loss_mode = loss_mode\n",
    "        params = itertools.chain(*[master_theory.get_parameters(targets = [\"master_model\", \"statistics_Net\"]) \\\n",
    "                                                       for master_theory in master_theory_dict.values()])\n",
    "\n",
    "        self.master_loss_fun = Master_Loss_Fun(core = self.loss_core, cumu_mode = self.loss_mode, loss_combine_mode = loss_combine_mode)\n",
    "        if self.optim_type[0] == \"LBFGS\":\n",
    "            self.optimizer = torch.optim.LBFGS(params, lr = self.optim_type[1])\n",
    "        else:\n",
    "            if self.optim_type[0] == \"adam\":\n",
    "                self.optimizer = torch.optim.Adam(params, lr = self.optim_type[1])\n",
    "            elif self.optim_type[0] == \"RMSprop\":\n",
    "                self.optimizer = torch.optim.RMSprop(params, lr = self.optim_type[1])\n",
    "            else:\n",
    "                raise Exception(\"optim_type {0} not recognized!\".format(self.optim_type[0]))\n",
    "\n",
    "        if patience is not None:\n",
    "            early_stopping = Early_Stopping(patience = patience)\n",
    "        to_stop = False\n",
    "        self.data_record = {}\n",
    "        images_per_row = int(max(1, int(5 / float(max(1, (len(theory_dict) + len(theory_dict)) / float(16))))))\n",
    "        theory_dict_to_test = theory_dict_test if theory_dict_test is not None else theory_dict\n",
    "        \n",
    "        print(\"Each m x n matrix shows the loss of m master_theories fitting to n theories.\")\n",
    "        for i in range(num_iter + 1):\n",
    "            self.optimizer.zero_grad()\n",
    "            loss_train = self.master_loss_fun(master_theory_dict, theory_dict)\n",
    "            reg = torch.cat([get_reg(master_theory, reg_dict, mode = \"L1\") for master_theory in master_theory_dict.values()], 0).sum()\n",
    "            loss = loss_train + reg\n",
    "            loss.backward()\n",
    "            self.optimizer.step()\n",
    "\n",
    "            if np.isnan(to_np_array(loss)):\n",
    "                raise Exception(\"NaN encountered!\")\n",
    "            if i % inspect_interval == 0:\n",
    "                loss_train = self.master_loss_fun(master_theory_dict, theory_dict)\n",
    "                loss_matrix_train = self.master_loss_fun.loss_matrix\n",
    "                loss_test = self.master_loss_fun(master_theory_dict, theory_dict_to_test, use_train = False)\n",
    "                loss_matrix_test = self.master_loss_fun.loss_matrix\n",
    "                record_data(self.data_record, [to_np_array(loss_train), to_np_array(loss_test), to_np_array(loss_matrix_train), to_np_array(loss_matrix_test)], \n",
    "                                              [\"loss_train\", \"loss_test\", \"loss_matrix_train\", \"loss_matrix_test\"])\n",
    "                print(\"iter {0}  \\tloss_train: {1:.9f} \\tloss_test: {2:.9f} \\treg: {3:.9f}\".format(i, to_np_array(loss_train), to_np_array(loss_test), to_np_array(reg)))\n",
    "                if patience is not None:\n",
    "                    to_stop = early_stopping.monitor(to_np_array(loss_test))\n",
    "                \n",
    "                if loss_matrix_train.size(0) > 1:\n",
    "                    train_best = to_np_array(loss_matrix_train.min(0)[1]).tolist()\n",
    "                    print(\"train best:\", train_best)\n",
    "                else:\n",
    "                    train_best = None\n",
    "                if isplot:\n",
    "                    x_axis_list_core = \"loss_train: {0:.4f}   loss_test: {1:.4f}   reg: {2:.5f}\".format(to_np_array(loss_train), to_np_array(loss_test), to_np_array(reg))\n",
    "                    x_axis_list = x_axis_list_core + \"\\n{0}\".format(train_best) if train_best is not None and len(train_best) < 10 else x_axis_list_core\n",
    "                    self.master_loss_fun.plot_loss_matrix(images_per_row = images_per_row, x_axis_list = [x_axis_list],\n",
    "                                                          filename = filename + \"_{0}_train.png\".format(i) if filename is not None else None)\n",
    "                if loss_matrix_test.size(0) > 1:\n",
    "                    test_best = loss_matrix_train.min(0)[1].cpu().data.numpy().tolist()\n",
    "                    print(\"test best:\", loss_matrix_test.min(0)[1].cpu().data.numpy().tolist())\n",
    "                else:\n",
    "                    test_best = None\n",
    "                if isplot:\n",
    "                    x_axis_list = x_axis_list_core + \"\\n{0}\".format(test_best) if test_best is not None and len(test_best) < 10 else x_axis_list_core  \n",
    "                    self.master_loss_fun.plot_loss_matrix(images_per_row = images_per_row, x_axis_list = [x_axis_list],\n",
    "                                                          filename = filename + \"_{0}_test.png\".format(i) if filename is not None else None)\n",
    "                try:\n",
    "                    sys.stdout.flush()\n",
    "                except:\n",
    "                    pass\n",
    "                if to_stop:\n",
    "                    print(\"Early stopping at iteration {0}\".format(i))\n",
    "                    break\n",
    "        return deepcopy(self.data_record)\n",
    "    \n",
    "    \n",
    "    def assign_master_theories_to_theories(self, master_theory_dict, theory_dict):\n",
    "        # Assigned master_theories to theory:\n",
    "        loss_matrix = self.master_loss_fun.get_loss_matrix(master_theory_dict, theory_dict, loss_combine_mode = \"on-loss\")\n",
    "        best_master_theory_id = loss_matrix.min(0)[1]\n",
    "        group_dict = OrderedDict()\n",
    "        for i in range(best_master_theory_id.size(0)):\n",
    "            best_id = int(best_master_theory_id.data[i])\n",
    "            theory_name = list(theory_dict.keys())[i]\n",
    "            master_theory_name = list(master_theory_dict.keys())[best_id]\n",
    "            if master_theory_name not in group_dict:\n",
    "                group_dict[master_theory_name] = {\"master_theory\": master_theory_dict[master_theory_name], \"assigned_theory_dict\": OrderedDict()}\n",
    "            group_dict[master_theory_name][\"assigned_theory_dict\"][theory_name] = theory_dict[theory_name]\n",
    "        # Make the format compatible with self.fit_master_theory():\n",
    "        group_list = []\n",
    "        for name, item in group_dict.items():\n",
    "            assigned_master_theory_dict = {name: item[\"master_theory\"]}\n",
    "            assigned_theory_dict = item[\"assigned_theory_dict\"]\n",
    "            group_list.append([assigned_master_theory_dict, assigned_theory_dict])\n",
    "        return group_list\n",
    "\n",
    "\n",
    "    def fit_master_classifier_multi(\n",
    "        self,\n",
    "        group_list,\n",
    "        optim_type_classifier = (\"adam\", 5e-5),\n",
    "        reg_dict_classifier = {\"classifier\": {\"weight\": 1e-3, \"bias\": 1e-3}},\n",
    "        patience = 20,\n",
    "        ):\n",
    "#         for master_theory_dict, theory_dict in group_list:\n",
    "#             master_theory = master_theory_dict[list(master_theory_dict.keys())[0]]\n",
    "#             datasets = self.combine_datasets(theory_dict)\n",
    "        pass\n",
    "\n",
    "\n",
    "    def fit_master_classifier(\n",
    "        self,\n",
    "        master_theory,\n",
    "        theory_dict,\n",
    "        optim_type_classifier = (\"adam\", 5e-5),\n",
    "        reg_dict_classifier = {\"classifier\": {\"weight\": 1e-3, \"bias\": 1e-3}},\n",
    "        patience = 20,\n",
    "        ):\n",
    "        self.optim_type_classifier = optim_type_classifier\n",
    "        self.reg_dict_classifier = reg_dict_classifier\n",
    "        input_size = master_theory.input_size\n",
    "\n",
    "        dataset_chosen = self.combine_datasets(theory_dict)\n",
    "        dataset_excluded = self.combine_datasets({name: theory_tuple for name, theory_tuple in self.theory.items() if name not in theory_dict and theory_tuple.pred_net.input_size == input_size})\n",
    "        ((X_train_chosen, y_train_chosen), (X_test_chosen, y_test_chosen), _), _ = dataset_chosen\n",
    "        ((X_train_excluded, y_train_excluded), (X_test_excluded, y_test_excluded), _), _ = dataset_excluded\n",
    "        X_train_combined = torch.cat([X_train_chosen, X_train_excluded], 0)\n",
    "        X_test_combined = torch.cat([X_test_chosen, X_test_excluded], 0)\n",
    "        u_train_combined = Variable(torch.cat([torch.ones(X_train_chosen.size(0)), torch.zeros(X_train_excluded.size(0))], 0).long().unsqueeze(1), requires_grad = False)\n",
    "        u_test_combined = Variable(torch.cat([torch.ones(X_test_chosen.size(0)), torch.zeros(X_test_excluded.size(0))], 0).long().unsqueeze(1), requires_grad = False)\n",
    "        batch_gen = Batch_Generator(X_train_combined, u_train_combined, batch_size = 128)\n",
    "        batch_gen_test = Batch_Generator(X_test_combined, u_test_combined, batch_size = 2000)\n",
    "\n",
    "        params = master_theory.classifier.parameters()\n",
    "        if self.optim_type_classifier[0] == \"LBFGS\":\n",
    "            self.optimizer_classifier = torch.optim.LBFGS(params, lr = self.optim_type_classifier[1])\n",
    "            num_iter = 5000\n",
    "            inspect_interval = 50\n",
    "        else:\n",
    "            num_iter = 30000\n",
    "            inspect_interval = 50\n",
    "            if self.optim_type_classifier[0] == \"adam\":\n",
    "                self.optimizer_classifier = torch.optim.Adam(params, lr = self.optim_type_classifier[1])\n",
    "            elif self.optim_type_classifier[0] == \"RMSprop\":\n",
    "                self.optimizer_classifier = torch.optim.RMSprop(params, lr = self.optim_type_classifier[1])\n",
    "            else:\n",
    "                raise Exception(\"optim_type {0} not recognized!\".format(self.optim_type_classifier[0]))\n",
    "\n",
    "        early_stopping = Early_Stopping(patience = patience)\n",
    "        to_stop = False\n",
    "        self.data_record_classifier = {}\n",
    "        ratio = X_train_chosen.size(0) / float(X_train_chosen.size(0) + X_train_excluded.size(0))\n",
    "        weight = torch.FloatTensor(np.array([ratio, 1 - ratio]))\n",
    "        if self.is_cuda:\n",
    "            weight = weight.cuda()\n",
    "\n",
    "        for i in range(num_iter + 1):\n",
    "            X_batch, u_batch = batch_gen.next_batch(isTorch = True, is_cuda = self.is_cuda)\n",
    "            self.optimizer_classifier.zero_grad()\n",
    "            pred_batch = master_theory.classifier(X_batch)\n",
    "            loss_train = nn.CrossEntropyLoss(weight = weight)(pred_batch, u_batch.long().view(-1))\n",
    "            reg = get_reg(master_theory, self.reg_dict_classifier, \"L1\")\n",
    "            loss = loss_train + reg\n",
    "            loss.backward()\n",
    "            self.optimizer_classifier.step()\n",
    "\n",
    "            X_batch_test, u_batch_test = batch_gen_test.next_batch(isTorch = True, is_cuda = self.is_cuda)\n",
    "            pred_batch_test = master_theory.classifier(X_batch_test)\n",
    "            loss_test = nn.CrossEntropyLoss(weight = weight)(pred_batch_test, u_batch_test.long().view(-1))\n",
    "            if np.isnan(to_np_array(loss)):\n",
    "                raise Exception(\"NaN encountered!\")\n",
    "            if i % inspect_interval == 0:\n",
    "                to_stop = early_stopping.monitor(to_np_array(loss_test))\n",
    "                record_data(self.data_record_classifier, [to_np_array(loss_train), to_np_array(loss_test)], [\"loss_train\", \"loss_test\"])\n",
    "                print(\"Classifier iter {0}  \\tloss_train: {1:.9f} \\tloss_test: {2:.9f} \\treg: {3:.9f}\".format(i, to_np_array(loss_train), to_np_array(loss_test), to_np_array(reg)))\n",
    "                if to_stop:\n",
    "                    print(\"Early stopping at iteration {0}\".format(i))\n",
    "                    break\n",
    "\n",
    "\n",
    "def get_regulated_latent_param(master_model, latent_param):\n",
    "    assert len(latent_param.view(-1)) == len(master_model.struct_param) * 2 or len(latent_param.view(-1)) == len(master_model.struct_param)\n",
    "    if len(latent_param.view(-1)) == len(master_model.struct_param) * 2:\n",
    "        latent_param = {i: latent_param.view(-1)[2*i: 2*i+2] for i in range(len(master_model.struct_param))}\n",
    "    else:\n",
    "        latent_param = {i: latent_param.view(-1)[i:i+1] for i in range(len(master_model.struct_param))}\n",
    "    return latent_param\n",
    "\n",
    "\n",
    "def get_reg(master_theory, reg_dict, mode = \"L1\"):   \n",
    "    reg = Variable(torch.FloatTensor([0]), requires_grad = False)\n",
    "    if master_theory.is_cuda:\n",
    "        reg = reg.cuda()\n",
    "    for net_target, reg_setting in reg_dict.items():\n",
    "        for source_target, reg_amp in reg_setting.items():\n",
    "            reg = reg + master_theory.get_regularization(targets = [net_target], source = [source_target], mode = mode) * reg_amp\n",
    "    return reg\n",
    "\n",
    "\n",
    "# The following functions load theories, master theories and theory hub from file:\n",
    "def load_model_dict_at_theory_hub(model_dict, is_cuda = False):\n",
    "    if model_dict[\"type\"] == \"Theory_Tuple\":\n",
    "        return load_model_dict_theory_tuple(model_dict, is_cuda = is_cuda)\n",
    "    elif model_dict[\"type\"] == \"Master_Theory_Tuple\":\n",
    "        return load_model_dict_master_theory_tuple(model_dict, is_cuda = is_cuda)\n",
    "    elif model_dict[\"type\"] == \"Master_Theory\":\n",
    "        return load_model_dict_master_theory(model_dict, is_cuda = is_cuda)\n",
    "    elif model_dict[\"type\"] == \"Theory_Hub\":\n",
    "        return load_model_dict_theory_hub(model_dict, is_cuda = is_cuda)\n",
    "    else:\n",
    "        raise Exception(\"type {0} not recognized!\".format(model_dict[\"type\"]))\n",
    "\n",
    "\n",
    "def load_model_dict_theory_tuple(model_dict, is_cuda = False):\n",
    "    return Theory_Tuple(pred_net = model_dict[\"pred_net\"],\n",
    "                        domain_net = model_dict[\"domain_net\"],\n",
    "                        dataset = model_dict[\"dataset\"],\n",
    "                        is_Lagrangian = model_dict[\"is_Lagrangian\"] if \"is_Lagrangian\" in model_dict else False,\n",
    "                        is_cuda = is_cuda,\n",
    "                       )\n",
    "\n",
    "def load_model_dict_master_theory_tuple(model_dict, is_cuda = False):\n",
    "    theory_tuples = {name: load_model_dict_theory_tuple(theory_tuple, is_cuda = is_cuda) for name, theory_tuple in model_dict[\"theory_tuples\"].items()}\n",
    "    return Master_Theory_Tuple(master_theory = load_model_dict_master_theory(model_dict[\"master_theory\"], is_cuda = is_cuda),\n",
    "                               theory_tuples = theory_tuples,\n",
    "                               is_cuda = is_cuda,\n",
    "                              )\n",
    "\n",
    "def load_model_dict_master_theory(model_dict, is_cuda = False):\n",
    "    master_theory = Master_Theory(input_size = model_dict[\"input_size\"],\n",
    "                                  pre_pooling_neurons = model_dict[\"statistics_Net\"][\"pre_pooling_neurons\"],\n",
    "                                  struct_param_statistics_Net = [model_dict[\"statistics_Net\"][\"struct_param_pre\"],\n",
    "                                                                 model_dict[\"statistics_Net\"][\"struct_param_post\"]],\n",
    "                                  struct_param_classifier = model_dict[\"classifier\"][\"struct_param\"],\n",
    "                                  is_cuda = is_cuda,\n",
    "                                 )\n",
    "    if model_dict[\"master_model\"] is not None:\n",
    "        master_theory.master_model = load_model_dict_net(model_dict[\"master_model\"], is_cuda = is_cuda)\n",
    "    if \"master_model_type\" in model_dict:\n",
    "        master_theory.master_model_type = model_dict[\"master_model_type\"]\n",
    "    master_theory.statistics_Net.load_model_dict(model_dict[\"statistics_Net\"])\n",
    "    master_theory.classifier.load_model_dict(model_dict[\"classifier\"])\n",
    "    return master_theory\n",
    "\n",
    "def load_model_dict_theory_hub(model_dict, is_cuda = False):\n",
    "    theory_hub = Theory_Hub(is_cuda = is_cuda)\n",
    "    for name, theory_tuple in model_dict[\"theory_collection\"].items():\n",
    "        theory_hub.theory_collection[name] = load_model_dict_theory_tuple(theory_tuple, is_cuda = is_cuda)\n",
    "    for name, master_theory_tuple in model_dict[\"master_theory_collection\"].items():\n",
    "        theory_hub.master_theory_collection[name] = load_model_dict_master_theory_tuple(master_theory_tuple, is_cuda = is_cuda)\n",
    "    return theory_hub\n",
    "\n",
    "\n",
    "def select_explained_data(model, X, y, threshold = 1e-4):\n",
    "    chosen_id = ((model(X) - y) ** 2 < threshold)\n",
    "    X_chosen = torch.masked_select(X, chosen_id.detach()).view(-1, X.size(1))\n",
    "    y_chosen = torch.masked_select(y, chosen_id.detach()).view(-1, y.size(1))\n",
    "    X_others = torch.masked_select(X, ~chosen_id.detach()).view(-1, X.size(1))\n",
    "    y_others = torch.masked_select(y, ~chosen_id.detach()).view(-1, y.size(1))\n",
    "    return (X_chosen, y_chosen), (X_others, y_others)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Loss function for unifying multiple theory into a network master theory:\n",
    "class Induce_Loss_Fun(nn.Module):\n",
    "    def __init__(self, core = \"mse\", loss_combine_mode = \"on-loss\", cumu_mode = \"harmonic\"):\n",
    "        super(Induce_Loss_Fun, self).__init__()\n",
    "        self.core = core\n",
    "        self.loss_combine_mode = loss_combine_mode\n",
    "        if self.loss_combine_mode == \"on-data\":\n",
    "            self.loss_fun_cumu = Loss_Fun_Cumu(core = core, cumu_mode = cumu_mode)\n",
    "        self.loss_fun = Loss_Fun(core = core)\n",
    "\n",
    "\n",
    "    def forward(self, master_theory_dict, theory_model, X, y = None, loss_combine_mode = None):\n",
    "        target = theory_model(X) if y is None else y\n",
    "        if loss_combine_mode is None:\n",
    "            loss_combine_mode = self.loss_combine_mode\n",
    "        if loss_combine_mode == \"on-loss\":\n",
    "            loss_column = []\n",
    "            for _, master_theory in master_theory_dict.items():\n",
    "                latent_param = master_theory.statistics_Net(torch.cat([X, target], 1))\n",
    "                if master_theory.master_model_type == \"symbolic\":\n",
    "                    latent_param = {0: {param_name: latent_param[0, k] for k, param_name in enumerate(master_theory.param_name_list)}}\n",
    "                elif master_theory.master_model_type == \"regulated-Net\":\n",
    "                    latent_param = get_regulated_latent_param(master_theory.master_model, latent_param)\n",
    "                master_pred = master_theory.master_model(X, latent_param)\n",
    "                loss = self.loss_fun(master_pred, target)\n",
    "                loss_column.append(loss)\n",
    "            loss_column = torch.stack(loss_column, 0)\n",
    "        elif loss_combine_mode == \"on-data\":\n",
    "            loss_column = []\n",
    "            master_pred_list = []\n",
    "            for _, master_theory in master_theory_dict.items():\n",
    "                latent_param = master_theory.statistics_Net(torch.cat([X, target], 1))\n",
    "                if master_theory.master_model_type == \"symbolic\":\n",
    "                    latent_param = {0: {param_name: latent_param[0, k] for k, param_name in enumerate(master_theory.param_name_list)}}\n",
    "                elif master_theory.master_model_type == \"regulated-Net\":\n",
    "                    latent_param = get_regulated_latent_param(master_theory.master_model, latent_param)\n",
    "                master_pred = master_theory.master_model(X, latent_param)\n",
    "                master_pred_list.append(master_pred)\n",
    "            master_pred_list = torch.stack(master_pred_list, 1)\n",
    "            loss_column = self.loss_fun_cumu(master_pred_list, target)\n",
    "        else:\n",
    "            raise Exception(\"loss_combine_mode {0} not recognized!\".format(loss_combine_mode))\n",
    "        return loss_column\n",
    "\n",
    "\n",
    "class Master_Loss_Fun(nn.Module):\n",
    "    def __init__(self, core = \"mse\", cumu_mode = \"harmonic\", loss_combine_mode = \"on-loss\", epsilon = 1e-10):\n",
    "        super(Master_Loss_Fun, self).__init__()\n",
    "        self.epsilon = epsilon\n",
    "        self.cumu_mode = cumu_mode\n",
    "        self.induce_loss_fun = Induce_Loss_Fun(core = core, loss_combine_mode = loss_combine_mode, cumu_mode = cumu_mode)\n",
    "\n",
    "\n",
    "    def get_loss_matrix(self, master_theory_dict, theory_dict, loss_combine_mode = None, use_train = True, use_target = True):\n",
    "        loss_matrix = []\n",
    "        for theory_name, theory_tuple in theory_dict.items():\n",
    "            theory_model = theory_tuple.pred_net\n",
    "            ((X_train, y_train), (X_test, y_test), _), _ = theory_tuple.dataset\n",
    "            if use_train:\n",
    "                X = X_train\n",
    "                y = y_train if use_target else None\n",
    "            else:\n",
    "                X = X_test\n",
    "                y = y_test if use_target else None\n",
    "            loss_matrix.append(self.induce_loss_fun(master_theory_dict, theory_model, X, y, loss_combine_mode = loss_combine_mode))\n",
    "        self.loss_matrix = torch.stack(loss_matrix, 1)\n",
    "        return self.loss_matrix\n",
    "\n",
    "\n",
    "    def forward(self, master_theory_dict, theory_dict, use_train = True, use_target = True):\n",
    "        if not isinstance(master_theory_dict, dict):\n",
    "            master_theory_dict = {\"master_0\": master_theory_dict}\n",
    "        self.get_loss_matrix(master_theory_dict, theory_dict, use_train = use_train, use_target = use_target)\n",
    "        if self.loss_matrix.size(0) == 1:\n",
    "            loss = self.loss_matrix.sum()\n",
    "        else:\n",
    "            if self.cumu_mode == \"harmonic\":\n",
    "                loss = (self.loss_matrix.size(0) / (1 / (self.loss_matrix + self.epsilon)).sum(0)).sum()\n",
    "            elif self.cumu_mode == \"min\":\n",
    "                loss = self.loss_matrix.min(0)[0].sum()\n",
    "            elif self.cumu_mode[0] == \"generalized-mean\":\n",
    "                order = self.cumu_mode[1]\n",
    "                loss = ((((self.loss_matrix + self.epsilon) ** order).mean(0)) ** (1 / float(order))).sum()\n",
    "            else:\n",
    "                raise Exception(\"mode {0} not recognized!\".format(self.cumu_mode))\n",
    "        return loss\n",
    "\n",
    "\n",
    "    def plot_loss_matrix(self, master_theory_dict = None, theory_dict = None, loss_combine_mode = None, use_train = True, use_target = True, filename = None, **kwargs):\n",
    "        if master_theory_dict is not None or theory_dict is not None or loss_combine_mode is not None:\n",
    "            self.get_loss_matrix(master_theory_dict, theory_dict, loss_combine_mode = loss_combine_mode, use_train = use_train, use_target = use_target)\n",
    "        loss_matrix = self.loss_matrix\n",
    "        if loss_matrix.is_cuda:\n",
    "            loss_matrix = loss_matrix.cpu()\n",
    "        plot_matrices([np.log10(loss_matrix.data.numpy())], filename = filename, **kwargs)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
