{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from sklearn.metrics import confusion_matrix\n",
    "import pickle\n",
    "import random\n",
    "from copy import deepcopy\n",
    "import datetime\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.autograd import Variable, grad\n",
    "import torch.nn.functional as F\n",
    "import torch.utils.data as data_utils\n",
    "from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau\n",
    "\n",
    "import sys, os\n",
    "sys.path.append(os.path.join(os.path.dirname(\"__file__\"), '..', '..'))\n",
    "from AI_physicist.theory_learning.models import Loss_Fun_Cumu, get_Lagrangian_loss\n",
    "from AI_physicist.theory_learning.util_theory import forward, logplus, Loss_Decay_Scheduler, count_metrics_pytorch, plot3D, plot_indi_domain, to_one_hot, load_info_dict, get_piecewise_dataset, get_group_norm\n",
    "from AI_physicist.settings.filepath import theory_PATH\n",
    "from AI_physicist.settings.global_param import COLOR_LIST, PrecisionFloorLoss\n",
    "from AI_physicist.pytorch_net.util import Loss_Fun, make_dir, Early_Stopping, record_data, plot_matrices, get_args, base_repr, base_repr_2_int\n",
    "from AI_physicist.pytorch_net.util import sort_two_lists, to_string, Loss_with_uncertainty, get_optimizer, Gradient_Noise_Scale_Gen, to_np_array, to_Variable, to_Boolean, get_criterion\n",
    "from AI_physicist.pytorch_net.net import MLP, Model_Ensemble, load_model_dict, construct_model_ensemble_from_nets, train_simple"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_pred_with_uncertainty(preds, uncertainty_nets, X):\n",
    "    log_std = uncertainty_nets(X)\n",
    "    info_list = torch.exp(-2 * (F.relu(log_std + 20) - 20)) + 1e-20\n",
    "    pred_with_uncertainty = (preds * info_list).sum(1, keepdim = True) / info_list.sum(1, keepdim = True)\n",
    "    return pred_with_uncertainty, info_list\n",
    "\n",
    "\n",
    "def get_best_model_idx(net_dict, X, y, loss_fun_cumu, forward_steps = 1, mode = \"first_step\", is_Lagrangian = False):\n",
    "    preds, _ = get_preds_valid(net_dict, X, forward_steps = forward_steps, is_Lagrangian = is_Lagrangian)\n",
    "    loss_indi = loss_fun_cumu(preds, y, cumu_mode = \"original\", neglect_threshold_on = False, is_mean = False)\n",
    "    num_theories = net_dict[\"pred_nets\"].num_models\n",
    "    if mode == \"first_step\":\n",
    "        return loss_indi.min(1)[1] // num_theories ** (forward_steps - 1)\n",
    "    elif mode == \"expanded\":\n",
    "        return loss_indi.min(1)[1]\n",
    "    else:\n",
    "        raise Exception(\"mode {0} not recognized!\".format(mode))\n",
    "\n",
    "\n",
    "def get_valid_prob(X, domain_net, num_theories, domain_pred_mode = \"onehot\"):\n",
    "    if domain_net is not None:\n",
    "        if domain_pred_mode == \"prob\":\n",
    "            valid_prob = nn.Softmax(dim = 1)(domain_net(X))\n",
    "        elif domain_pred_mode == \"onehot\":\n",
    "            valid_prob = nn.Softmax(dim = 1)(domain_net(X)).max(1)[1]\n",
    "            valid_prob = to_one_hot(valid_prob, num = num_theories).float()\n",
    "        else:\n",
    "            raise Exception(\"domain_pred_mode {0} not recognized!\".format(domain_pred_mode))\n",
    "    else:\n",
    "        valid_prob = None\n",
    "    return valid_prob\n",
    "\n",
    "\n",
    "def get_preds_valid(net_dict, X, forward_steps = 1, domain_net = None, domain_pred_mode = \"onehot\", is_Lagrangian = False, disable_autoencoder = False):\n",
    "    if \"autoencoder\" in net_dict and len(X.shape) == 4 and not disable_autoencoder:\n",
    "        autoencoder = net_dict[\"autoencoder\"]\n",
    "        X = autoencoder.encode(X)\n",
    "    pred_nets = net_dict[\"pred_nets\"]\n",
    "    num_theories = pred_nets.num_models\n",
    "    if is_Lagrangian:\n",
    "        assert forward_steps == 1\n",
    "        preds = get_Lagrangian_loss(pred_nets, X)\n",
    "    else:\n",
    "        preds = pred_nets(X)\n",
    "        \n",
    "    valid_prob = get_valid_prob(X, domain_net, num_theories = num_theories, domain_pred_mode = domain_pred_mode)\n",
    "    if forward_steps > 1:\n",
    "        num_output_dims = preds.size(-1)\n",
    "        input_dim = int(X.size(1) / num_output_dims)\n",
    "        pred_list_all = [preds]\n",
    "        for i in range(forward_steps - 1):\n",
    "            new_pred_list = []\n",
    "            valid_prob_list = []\n",
    "            for k in range(pred_list_all[-1].size(1)):\n",
    "                prev_idx = base_repr(k, num_theories, len(pred_list_all))\n",
    "                prev_len = len(prev_idx)\n",
    "                current_input = []\n",
    "                source_dim = input_dim - prev_len\n",
    "                if source_dim > 0:\n",
    "                    current_input.append(X[:, -source_dim * num_output_dims:].contiguous().view(-1, source_dim, num_output_dims))\n",
    "                len_backward = min(input_dim, prev_len)\n",
    "                for j in range(len_backward):\n",
    "                    idx_j = base_repr_2_int(prev_idx[:len(prev_idx)-len_backward+j+1], base = num_theories)\n",
    "                    current_input.append(pred_list_all[-len_backward + j][:, idx_j:idx_j+1])\n",
    "                current_input = torch.cat(current_input, 1)\n",
    "                current_input_flattened = current_input.view(-1, input_dim * num_output_dims)\n",
    "                if domain_net is not None:\n",
    "                    new_valid_prob_ele = get_valid_prob(current_input_flattened, domain_net, num_theories = num_theories, domain_pred_mode = domain_pred_mode)\n",
    "                    valid_prob_ele = valid_prob[:, k:k+1] * new_valid_prob_ele\n",
    "                    valid_prob_list.append(valid_prob_ele)\n",
    "                new_pred = net_dict[\"pred_nets\"](current_input_flattened)\n",
    "                new_pred_list.append(new_pred)\n",
    "            new_pred_list = torch.cat(new_pred_list, 1)\n",
    "            if domain_net is not None:\n",
    "                valid_prob = torch.cat(valid_prob_list, 1)\n",
    "            pred_list_all.append(new_pred_list)\n",
    "        preds = pred_list_all[-1]\n",
    "    if \"autoencoder\" in net_dict and not disable_autoencoder:\n",
    "        preds = net_dict[\"autoencoder\"].decode(preds)\n",
    "    return preds, valid_prob\n",
    "\n",
    "\n",
    "def get_loss(\n",
    "    net_dict,\n",
    "    X,\n",
    "    y,\n",
    "    loss_types, \n",
    "    forward_steps = 1, \n",
    "    domain_net = None, \n",
    "    domain_pred_mode = \"onehot\", \n",
    "    loss_fun_dict = {},\n",
    "    replaced_loss_order = None, \n",
    "    is_Lagrangian = False,\n",
    "    is_mean = True,\n",
    "    ):\n",
    "    \"\"\"Evaluates the various loss metrics.\"\"\"\n",
    "    preds, valid_prob = get_preds_valid(net_dict,\n",
    "                                        X,\n",
    "                                        forward_steps = forward_steps,\n",
    "                                        domain_net = domain_net, \n",
    "                                        domain_pred_mode = domain_pred_mode,\n",
    "                                        is_Lagrangian = is_Lagrangian,\n",
    "                                       )\n",
    "    loss_dict = {}\n",
    "    for loss_mode, loss_setting in loss_types.items():\n",
    "        if loss_mode[:10] == \"pred-based\":\n",
    "            loss_fun_cumu = loss_fun_dict[\"loss_fun_cumu\"]\n",
    "            loss_mode_split = loss_mode.split(\"_\")\n",
    "            try:\n",
    "                cumu_mode = (loss_mode_split[1], float(loss_mode_split[2]))\n",
    "            except:\n",
    "                cumu_mode = loss_mode_split[1]\n",
    "            if replaced_loss_order is not None and cumu_mode[0] == \"generalized-mean\" and (\"decay_on\" in loss_setting and loss_setting[\"decay_on\"] is True):\n",
    "                cumu_mode = (cumu_mode[0], replaced_loss_order[loss_mode])\n",
    "            loss_dict[loss_mode] = loss_fun_cumu(preds, y, model_weights = valid_prob, cumu_mode = cumu_mode, is_mean = is_mean) * loss_setting[\"amp\"]\n",
    "        elif loss_mode == \"uncertainty-based\":\n",
    "            assert forward_steps == 1\n",
    "            loss_with_uncertainty = loss_fun_dict[\"loss_with_uncertainty\"]\n",
    "            loss_with_uncertainty.is_mean = is_mean\n",
    "            pred_with_uncertainty, info_list = get_pred_with_uncertainty(preds, net_dict[\"uncertainty_nets\"], X)\n",
    "            std = info_list.sum(1) ** (-0.5)\n",
    "            loss_dict[loss_mode] = loss_with_uncertainty(pred_with_uncertainty, y, std = std) * loss_setting[\"amp\"]\n",
    "        else:\n",
    "            raise Exception(\"loss_mode {0} not recognized!\".format(loss_mode))\n",
    "    loss = 0\n",
    "    for loss_mode, loss_ele in loss_dict.items():\n",
    "        loss = loss + loss_ele\n",
    "    return loss, loss_dict\n",
    "\n",
    "\n",
    "def get_reg(net_dict, reg_dict, mode = \"L2\", is_cuda = False):   \n",
    "    reg_value_dict = {}\n",
    "    for net_target, reg_setting in reg_dict.items():\n",
    "        if net_target == \"pred_nets\":\n",
    "            for source_target, reg_amp in reg_setting.items():\n",
    "                reg_value_dict[\"pred_nets\"] = net_dict[\"pred_nets\"].get_regularization(source = [source_target], mode = mode) * reg_amp\n",
    "        elif net_target == \"domain_net\":\n",
    "            for source_target, reg_amp in reg_setting.items():\n",
    "                reg_value_dict[\"domain_net\"] = net_dict[\"domain_net\"].get_regularization(source = [source_target], mode = mode) * reg_amp\n",
    "        elif net_target == \"uncertainty_nets\" and \"uncertainty_nets\" in net_dict:                \n",
    "            for source_target, reg_amp in reg_setting.items():\n",
    "                reg_value_dict[\"uncertainty_nets\"] = net_dict[\"uncertainty_nets\"].get_regularization(source = [source_target], mode = mode) * reg_amp\n",
    "        else:\n",
    "            raise\n",
    "    reg = Variable(torch.FloatTensor([0]), requires_grad = False)\n",
    "    if is_cuda:\n",
    "        reg = reg.cuda()\n",
    "    for net_target, reg_value in reg_value_dict.items():\n",
    "        reg = reg + reg_value\n",
    "    return reg, reg_value_dict\n",
    "\n",
    "\n",
    "def combine_losses(loss_with_domain, loss_without_domain, loss_distribution_mode, isTorch = True, **kwargs):\n",
    "    if isinstance(loss_distribution_mode, np.ndarray):\n",
    "        k = kwargs[\"k\"]\n",
    "        loss = loss_with_domain * loss_distribution_mode[k] + loss_without_domain * (1 - loss_distribution_mode[k])\n",
    "    elif isinstance(loss_distribution_mode, tuple):\n",
    "        if loss_distribution_mode[0] == \"generalized-mean\":\n",
    "            order = float(loss_distribution_mode[1])\n",
    "            loss = ((loss_with_domain ** order + loss_without_domain ** order) / 2) ** (1 / order)\n",
    "        else:\n",
    "            raise\n",
    "    elif loss_distribution_mode == \"min\":\n",
    "        if isTorch:\n",
    "            loss = torch.min(loss_with_domain, loss_without_domain)\n",
    "        else:\n",
    "            loss = np.minimum(loss_with_domain, loss_without_domain)\n",
    "    elif loss_distribution_mode == \"harmonic\":\n",
    "        loss = 2 / (1 / loss_with_domain + 1 / loss_without_domain)\n",
    "    elif loss_distribution_mode == \"mean\":\n",
    "        loss = (loss_with_domain + loss_without_domain) / 2\n",
    "    else:\n",
    "        raise Exception(\"loss_distribution_mode {0} not recognized!\".format(loss_distribution_mode))\n",
    "    return loss\n",
    "\n",
    "\n",
    "def load_model_dict(model_dict):\n",
    "    if model_dict[\"type\"] == \"Theory_Training\":\n",
    "        model = Theory_Training(num_theories = model_dict[\"num_theories\"],\n",
    "                                proposed_theory_models = None,\n",
    "                                input_size = model_dict[\"input_size\"],\n",
    "                                struct_param_pred = model_dict[\"struct_param_pred\"],\n",
    "                                struct_param_domain = model_dict[\"struct_param_domain\"],\n",
    "                                struct_param_uncertainty = model_dict[\"struct_param_uncertainty\"],\n",
    "                                settings_pred = model_dict[\"settings_pred\"],\n",
    "                                settings_domain = model_dict[\"settings_domain\"],\n",
    "                                settings_uncertainty = model_dict[\"settings_uncertainty\"],\n",
    "                                autoencoder = load_model_dict(model_dict[\"autoencoder\"]),\n",
    "                                loss_types = model_dict[\"loss_types\"],\n",
    "                                loss_core = model_dict[\"loss_core\"],\n",
    "                                loss_order = model_dict[\"loss_order\"] if \"loss_order\" in model_dict else -1,\n",
    "                                is_Lagrangian = model_dict[\"is_Lagrangian\"] if \"is_Lagrangian\" in model_dict else False,\n",
    "                                neglect_threshold = model_dict[\"neglect_threshold\"] if \"neglect_threshold\" in model_dict else None,\n",
    "                                reg_multiplier = model_dict[\"reg_multiplier\"] if \"reg_multiplier\" in model_dict else None,\n",
    "                               )\n",
    "        model.pred_nets.load_model_dict(model_dict[\"pred_nets\"])\n",
    "        model.domain_net.load_model_dict(model_dict[\"domain_net\"])\n",
    "        model.domain_net_on = model_dict[\"domain_net_on\"]\n",
    "        if model_dict[\"struct_param_uncertainty\"] is not None:\n",
    "            model.uncertainty_nets.load_model_dict(model_dict[\"uncertainty_nets\"])\n",
    "    else:\n",
    "        raise Exception(\"type {0} not recognized!\".format(model_dict[\"type\"]))\n",
    "    return model\n",
    "\n",
    "\n",
    "class Theory_Training(nn.Module):\n",
    "    \"\"\"Implementing the core subroutine of the differentiable-divide-and-conquer (DDAC)\"\"\"\n",
    "    def __init__(\n",
    "        self,\n",
    "        num_theories,\n",
    "        proposed_theory_models,\n",
    "        input_size,\n",
    "        struct_param_pred,\n",
    "        struct_param_domain,\n",
    "        struct_param_uncertainty = None,\n",
    "        settings_pred = {},\n",
    "        settings_domain = {},\n",
    "        settings_uncertainty = {},\n",
    "        autoencoder = None,\n",
    "        loss_types = {},\n",
    "        loss_core = \"mse\",\n",
    "        loss_order = -1,\n",
    "        loss_balance_model_influence = False,\n",
    "        loss_precision_floor = PrecisionFloorLoss,\n",
    "        is_Lagrangian = False,\n",
    "        neglect_threshold = None,\n",
    "        reg_multiplier = None,\n",
    "        is_cuda = False,\n",
    "        ):\n",
    "        super(Theory_Training, self).__init__()\n",
    "        self.num_theories = num_theories\n",
    "        self.input_size = input_size\n",
    "        self.loss_types = loss_types\n",
    "        self.loss_core = loss_core\n",
    "        self.loss_order = loss_order\n",
    "        self.loss_balance_model_influence = loss_balance_model_influence\n",
    "        self.loss_precision_floor = loss_precision_floor\n",
    "        self.is_Lagrangian = is_Lagrangian\n",
    "        self.neglect_threshold = neglect_threshold\n",
    "        self.reg_multiplier = deepcopy(reg_multiplier)\n",
    "        if self.reg_multiplier is not None:\n",
    "            self.reg_model_idx = -1\n",
    "            self.reg_domain_idx = -1\n",
    "        self.is_cuda = is_cuda\n",
    "        \n",
    "        if proposed_theory_models is not None and len(proposed_theory_models) > 0:\n",
    "            # If proposed_theory_models is not None, use the proposed models to construct pred_nets:\n",
    "            fraction_best_list = []\n",
    "            proposed_model_list = []\n",
    "            for name, theory_info in proposed_theory_models.items():\n",
    "                fraction_best_list.append(theory_info[\"fraction_best\"])\n",
    "                proposed_model_list.append(theory_info[\"theory_model\"])\n",
    "            fraction_best_list, proposed_model_list = sort_two_lists(fraction_best_list, proposed_model_list, reverse = True)\n",
    "            proposed_model_list = proposed_model_list[:self.num_theories]\n",
    "            for i in range(self.num_theories - len(proposed_model_list)):\n",
    "                net = MLP(input_size = self.input_size, struct_param = struct_param_pred, settings = settings_pred, is_cuda = self.is_cuda)\n",
    "                proposed_model_list.append(net)\n",
    "            self.pred_nets = construct_model_ensemble_from_nets(proposed_model_list)\n",
    "        else:\n",
    "            self.pred_nets = Model_Ensemble(num_models = self.num_theories, input_size = self.input_size if not self.is_Lagrangian else int(self.input_size / 2), \n",
    "                                          struct_param = struct_param_pred, settings = settings_pred, is_cuda = self.is_cuda)\n",
    "        \n",
    "        self.domain_net = MLP(input_size = self.input_size, struct_param = struct_param_domain, settings = settings_domain, is_cuda = self.is_cuda)\n",
    "        self.domain_net_on = False\n",
    "        self.loss_fun_cumu = Loss_Fun_Cumu(core = loss_core, cumu_mode = (\"generalized-mean\", loss_order), neglect_threshold = neglect_threshold, \n",
    "                                           balance_model_influence = loss_balance_model_influence, loss_precision_floor = self.loss_precision_floor,\n",
    "                                          )\n",
    "        self.net_dict = {}\n",
    "        self.net_dict[\"pred_nets\"] = self.pred_nets\n",
    "        self.net_dict[\"domain_net\"] = self.domain_net\n",
    "        if autoencoder is not None:\n",
    "            self.autoencoder = autoencoder\n",
    "            self.net_dict[\"autoencoder\"] = self.autoencoder\n",
    "        self.loss_fun_dict = {}\n",
    "        self.loss_fun_dict[\"loss_fun_cumu\"] = self.loss_fun_cumu\n",
    "        if self.struct_param_uncertainty is not None:\n",
    "            self.uncertainty_nets = Model_Ensemble(num_models = self.num_theories, input_size = self.input_size, \n",
    "                                                 struct_param = struct_param_uncertainty, settings = settings_uncertainty, is_cuda = self.is_cuda)\n",
    "            self.loss_with_uncertainty = Loss_with_uncertainty(core = self.loss_core)\n",
    "            self.net_dict[\"uncertainty_nets\"] = self.uncertainty_nets\n",
    "            self.loss_fun_dict[\"loss_with_uncertainty\"] = self.loss_with_uncertainty\n",
    "    \n",
    "    @property\n",
    "    def struct_param_pred(self):\n",
    "        return self.pred_nets.struct_param\n",
    "    \n",
    "    @property\n",
    "    def struct_param_domain(self):\n",
    "        return self.domain_net.struct_param\n",
    "    \n",
    "    @property\n",
    "    def struct_param_uncertainty(self):\n",
    "        if hasattr(self, \"uncertainty_nets\"):\n",
    "            return self.uncertainty_nets.struct_param\n",
    "        else:\n",
    "            return None\n",
    "    \n",
    "    @property\n",
    "    def settings_pred(self):\n",
    "        return self.pred_nets.settings\n",
    "    \n",
    "    @property\n",
    "    def settings_domain(self):\n",
    "        return self.domain_net.settings\n",
    "    \n",
    "    @property\n",
    "    def settings_uncertainty(self):\n",
    "        if hasattr(self, \"uncertainty_nets\"):\n",
    "            return self.uncertainty_nets.settings\n",
    "        else:\n",
    "            return None\n",
    "\n",
    "    @property\n",
    "    def model_dict(self):\n",
    "        model_dict = {\"type\": \"Theory_Training\"}\n",
    "        model_dict[\"num_theories\"] = self.num_theories\n",
    "        model_dict[\"input_size\"] = self.input_size\n",
    "        model_dict[\"struct_param_pred\"] = self.struct_param_pred\n",
    "        model_dict[\"struct_param_domain\"] = self.struct_param_domain\n",
    "        model_dict[\"struct_param_uncertainty\"] = self.struct_param_uncertainty\n",
    "        model_dict[\"settings_pred\"] = self.settings_pred\n",
    "        model_dict[\"settings_domain\"] = self.settings_domain\n",
    "        model_dict[\"settings_uncertainty\"] = self.settings_uncertainty\n",
    "        model_dict[\"loss_types\"] = self.loss_types\n",
    "        model_dict[\"loss_core\"] = self.loss_core\n",
    "        model_dict[\"loss_order\"] = self.loss_order\n",
    "        model_dict[\"is_Lagrangian\"] = self.is_Lagrangian\n",
    "        model_dict[\"neglect_threshold\"] = self.neglect_threshold\n",
    "        model_dict[\"reg_multiplier\"] = self.reg_multiplier\n",
    "        model_dict[\"pred_nets\"] = self.pred_nets.model_dict\n",
    "        model_dict[\"domain_net\"] = self.domain_net.model_dict\n",
    "        model_dict[\"domain_net_on\"] = self.domain_net_on\n",
    "        if hasattr(self, \"autoencoder\"):\n",
    "            model_dict[\"autoencoder\"] = self.autoencoder.model_dict\n",
    "        if self.struct_param_uncertainty is not None:\n",
    "            model_dict[\"uncertainty_nets\"] = self.uncertainty_nets.model_dict\n",
    "        return model_dict\n",
    "\n",
    "    def load_model_dict(self, model_dict):\n",
    "        new_model = load_model_dict(model_dict)\n",
    "        self.__dict__.update(new_model.__dict__)\n",
    "\n",
    "    @property\n",
    "    def DL(self):\n",
    "        return self.pred_nets.DL + self.domain_net.DL\n",
    "\n",
    "\n",
    "    def get_fraction_list(self, X, y = None, mode = \"best\"):\n",
    "        if mode == \"best\":\n",
    "            best_theory_idx = get_best_model_idx(self.net_dict, X, y, loss_fun_cumu = self.loss_fun_cumu, is_Lagrangian = self.is_Lagrangian)\n",
    "            fraction_list = to_one_hot(best_theory_idx, self.num_theories).sum(0).float() / len(best_theory_idx)\n",
    "            fraction_list = to_np_array(fraction_list.view(-1), full_reduce = False)\n",
    "        elif mode == \"domain\":\n",
    "            if hasattr(self, \"autoencoder\"):\n",
    "                X = self.autoencoder.encode(X)\n",
    "            valid_idx = self.domain_net(X).max(1)[1]\n",
    "            idx = to_one_hot(valid_idx, self.num_theories)\n",
    "            fraction_list = to_np_array(idx.sum(0), full_reduce = False) / float(len(X))\n",
    "        else:\n",
    "            raise\n",
    "        return fraction_list\n",
    "    \n",
    "    \n",
    "    def get_data_based_on_model(self, model_id, X, y, mode = \"best\"):\n",
    "        if mode == \"best\":\n",
    "            domain_pred_idx = get_best_model_idx(self.net_dict, X, y, loss_fun_cumu = self.loss_fun_cumu, is_Lagrangian = self.is_Lagrangian)\n",
    "        elif mode == \"domain\":\n",
    "            domain_pred_idx = self.domain_net(X).max(1)[1]\n",
    "        else:\n",
    "            raise\n",
    "        idx = (domain_pred_idx == model_id).unsqueeze(1)\n",
    "        X_chosen = torch.masked_select(X, idx).view(-1, *X.size()[1:])\n",
    "        y_chosen = torch.masked_select(y, idx).view(-1, *y.size()[1:])\n",
    "        return X_chosen, y_chosen\n",
    "\n",
    "\n",
    "    def get_loss_core(self):\n",
    "        return deepcopy(self.loss_fun_cumu.loss_fun.core)\n",
    "\n",
    "\n",
    "    def set_loss_core(self, loss_core, loss_precision_floor = None):\n",
    "        self.loss_core = loss_core\n",
    "        self.loss_fun_cumu.loss_fun.core = loss_core\n",
    "        if loss_precision_floor is not None:\n",
    "            self.loss_precision_floor = loss_precision_floor\n",
    "            self.loss_fun_cumu.loss_precision_floor = loss_precision_floor\n",
    "            self.loss_fun_cumu.loss_fun.loss_precision_floor = loss_precision_floor\n",
    "\n",
    "\n",
    "    def remove_theories_based_on_data(self, X, y, threshold, criteria = [\"best\", \"domain\"]):\n",
    "        to_prune_onehot = np.zeros(self.num_theories).astype(bool)\n",
    "        fraction_list_whole = []\n",
    "        for criteria_ele in criteria:\n",
    "            fraction_list = self.get_fraction_list(X, y, criteria_ele)\n",
    "            fraction_list_whole.append(fraction_list)\n",
    "            to_prune_onehot = to_prune_onehot | (fraction_list < threshold)\n",
    "        to_prune = to_prune_onehot.nonzero()[0].tolist()\n",
    "        print(\"fraction_best: {0}\".format(self.get_fraction_list(X, y, \"best\")))\n",
    "        print(\"fraction_domain: {0}\".format(self.get_fraction_list(X, y, \"domain\")))\n",
    "        if len(to_prune) > 0:\n",
    "            if len(to_prune) == self.num_theories:\n",
    "                print(\"Cannot remove all theories!\")\n",
    "                fraction_list_whole = np.array(fraction_list_whole)\n",
    "                to_prune = list(range(self.num_theories))\n",
    "                to_prune.remove(fraction_list_whole.mean(0).argmax())\n",
    "            self.remove_theories(to_prune)\n",
    "            print(\"theories {0} removed!\".format(to_prune))\n",
    "        return to_prune\n",
    "\n",
    "\n",
    "    def remove_theories(self, theory_ids):\n",
    "        self.pred_nets.remove_models(theory_ids)\n",
    "        self.domain_net.prune_neurons(layer_id = -1, neuron_ids = theory_ids)\n",
    "        assert self.pred_nets.num_models == self.domain_net.struct_param[-1][0], \\\n",
    "            \"pred_nets has {0} models, while domain_net has {1} output neurons!\".format(self.pred_nets.num_models, self.domain_net.struct_param[-1][0])\n",
    "        self.num_theories = self.pred_nets.num_models\n",
    "    \n",
    "    \n",
    "    def add_theories(\n",
    "        self,\n",
    "        X,\n",
    "        y,\n",
    "        validation_data = None,\n",
    "        criteria = (\"loss_with_domain\", 0),\n",
    "        loss_threshold = 1e-5,\n",
    "        fraction_threshold = 0.05,\n",
    "        isplot = True,\n",
    "        **kwargs\n",
    "        ):\n",
    "        if validation_data is None:\n",
    "            validation_data = (X, y)\n",
    "        if hasattr(self, \"autoencoder\"):\n",
    "            X_lat = self.autoencoder.encode(X).detach()\n",
    "            autoencoder = self.autoencoder\n",
    "        else:\n",
    "            X_lat = X\n",
    "            autoencoder = None\n",
    "        X_test, y_test = validation_data\n",
    "        criterion = nn.MSELoss(reduce = False)\n",
    "        fraction_list = self.get_fraction_list(X_test, y_test)\n",
    "        valid_idx = self.domain_net(X_lat).max(1)[1]\n",
    "        idx = to_Boolean(to_one_hot(valid_idx, self.num_theories))\n",
    "        if len(X.shape) == 4:\n",
    "            idx = idx.unsqueeze(-1).unsqueeze(-1)\n",
    "        is_add = False\n",
    "        info = {}\n",
    "\n",
    "        for i in range(self.num_theories):\n",
    "            if fraction_list[i] > 0.3:\n",
    "                X_chosen = torch.masked_select(X, idx[:, i:i+1]).view(-1, *X.size()[1:])\n",
    "                X_chosen_lat = torch.masked_select(X_lat, idx[:, i:i+1].view(-1, 1)).view(-1, *X_lat.size()[1:])\n",
    "                y_chosen = torch.masked_select(y, idx[:, i:i+1]).view(-1, *y.size()[1:])\n",
    "                if len(X_chosen) == 0:\n",
    "                    continue\n",
    "                model = self.pred_nets.fetch_model(i)\n",
    "                pred = forward(model, X_chosen_lat, autoencoder = autoencoder, is_Lagrangian = self.is_Lagrangian)\n",
    "                loss = criterion(pred, y_chosen)\n",
    "                if len(loss.shape) == 4:\n",
    "                    loss = loss.mean(-1, keepdim = True).mean(-2, keepdim = True)\n",
    "                loss = loss.sum(1, keepdim = True)\n",
    "                idx_large = (loss > loss_threshold).detach()\n",
    "                large_fraction = to_np_array(idx_large.long().sum().float() / float(X.size(0)))\n",
    "                if large_fraction > fraction_threshold:\n",
    "                    info[i] = {}\n",
    "                    print(\"%\" * 40 + \"\\nThe large loss points for theory_{0} constitute a fraction of {1:.4f} of total points. Perform tentative splitting of theory_{2}.\\n\".format(i, large_fraction, self.num_theories + 1) + \"%\" * 40 + \"\\n\")\n",
    "\n",
    "                    all_losses_dict = self.get_losses(X_test, y_test)\n",
    "                    # Perform tentative adding and adaptation of new theory:\n",
    "                    new_model = deepcopy(model)\n",
    "                    X_large = torch.masked_select(X_chosen, idx_large).view(-1, *X_chosen.size()[1:])\n",
    "                    X_large_lat = torch.masked_select(X_chosen_lat, idx_large.view(-1, 1)).view(-1, *X_chosen_lat.size()[1:])\n",
    "                    y_large = torch.masked_select(y_chosen, idx_large).view(-1, *y_chosen.size()[1:])\n",
    "                    train_simple(new_model, X_large_lat, y_large, loss_type = self.get_loss_core(), loss_precision_floor = self.loss_precision_floor, autoencoder = autoencoder, is_Lagrangian = self.is_Lagrangian)\n",
    "\n",
    "                    U = deepcopy(self)\n",
    "                    U.pred_nets.add_models(new_model)\n",
    "                    U.domain_net.add_neurons(-1, 1, ((\"copy\", i), None))\n",
    "                    U.num_theories += 1\n",
    "                    U.re_init_optimizers()\n",
    "                    if U.domain_net_on and \"DL\" not in U.get_loss_core():\n",
    "                        if isplot:\n",
    "                            print(\"\\nPerform joint training of all models:\\n\")\n",
    "                        data_record = U.iterative_train_schedule(X, y, \n",
    "                                                  validation_data = validation_data,\n",
    "                                                  reg_dict = U.reg_dict,\n",
    "                                                  reg_mode = U.reg_mode,\n",
    "                                                  forward_steps = U.forward_steps,\n",
    "                                                  domain_pred_mode = U.domain_pred_mode,\n",
    "                                                  epochs = 10000, \n",
    "                                                  patience = 200,\n",
    "                                                  isplot = isplot, \n",
    "                                                  prefix = \"Tentative splitting of theory_{0}, train_model:\".format(i), \n",
    "                                                  num_phases = 2,\n",
    "                                                  add_theory_quota = 0,\n",
    "                                                  **kwargs\n",
    "                                                 )\n",
    "                        if isplot:\n",
    "                            print(\"%\" * 40 + \"\\nRefit domain to best model:\\n\")\n",
    "                        data_record_domain = U.fit_domain(X, y, \n",
    "                                                          validation_data = validation_data,\n",
    "                                                          reg_dict = U.reg_dict,\n",
    "                                                          reg_mode = U.reg_mode,\n",
    "                                                          forward_steps = U.forward_steps,\n",
    "                                                          domain_pred_mode = U.domain_pred_mode,\n",
    "                                                          epochs = 10000, \n",
    "                                                          patience = 200, \n",
    "                                                          isplot = isplot,\n",
    "                                                          prefix = \"Tentative splitting of theory_{0}, train_domain:\".format(i), \n",
    "                                                          **kwargs\n",
    "                                                         )\n",
    "                    else:\n",
    "                        if isplot:\n",
    "                            print(\"\\nPerform joint training of all models and domains:\\n\")\n",
    "                        data_record = U.iterative_train_schedule(X, y, \n",
    "                                                  validation_data = validation_data, \n",
    "                                                  reg_dict = U.reg_dict,\n",
    "                                                  reg_mode = U.reg_mode,\n",
    "                                                  forward_steps = U.forward_steps,\n",
    "                                                  domain_pred_mode = U.domain_pred_mode,\n",
    "                                                  epochs = 10000, \n",
    "                                                  patience = 200, \n",
    "                                                  domain_fit_setting = U.domain_fit_setting,\n",
    "                                                  isplot = isplot, \n",
    "                                                  prefix = \"Tentative splitting of theory_{0}, train_model and domain:\".format(i),\n",
    "                                                  num_phases = 2,\n",
    "                                                  add_theory_quota = 0,\n",
    "                                                  **kwargs\n",
    "                                                 )\n",
    "                    print(\"%\" * 40)\n",
    "                    U.set_loss_core(self.loss_core, self.loss_precision_floor)\n",
    "                    all_losses_dict_new = U.get_losses(X_test, y_test)\n",
    "\n",
    "                    # Passes:\n",
    "                    if all_losses_dict_new[criteria[0]] <= all_losses_dict[criteria[0]] + criteria[1]:\n",
    "                        print(\"The new {0} of {1} is smaller than the previous {2} + {3}, accept.\".format(criteria[0], all_losses_dict_new[criteria[0]],\n",
    "                                                                                                         all_losses_dict[criteria[0]], criteria[1]))\n",
    "                        info[i][\"fraction_best\"] = fraction_list[i]\n",
    "                        info[i][\"large_fraction\"] = large_fraction\n",
    "                        is_add = True\n",
    "                        self.set_net(\"pred_nets\", U.pred_nets)\n",
    "                        self.set_net(\"domain_net\", U.domain_net)\n",
    "                        info[i][\"data_record\"] = data_record\n",
    "                        if \"data_record_domain\" in locals():\n",
    "                            info[i][\"data_record_domain\"] = data_record_domain\n",
    "                        fraction_list = self.get_fraction_list(X_test, y_test)\n",
    "\n",
    "                        # Reinitialize the optimizers if already have one:\n",
    "                        self.re_init_optimizers()\n",
    "                    else:\n",
    "                        print(\"The new {0} of {1} is larger than the previous {2} + {3}, revert.\".format(criteria[0], all_losses_dict_new[criteria[0]],\n",
    "                                                                                                         all_losses_dict[criteria[0]], criteria[1]))\n",
    "\n",
    "        if not is_add:\n",
    "            print(\"Do not add new theories.\")\n",
    "        return is_add, info\n",
    "    \n",
    "    \n",
    "    def re_init_optimizers(self):\n",
    "        if hasattr(self, \"optimizer\"):\n",
    "            lr = self.optimizer.param_groups[0][\"lr\"]\n",
    "            trainable_parameters = [parameter for parameter in self.pred_nets.parameters() if parameter.requires_grad]\n",
    "            self.optimizer = get_optimizer(optim_type = self.optim_type[0], lr = lr, parameters = trainable_parameters)\n",
    "            if hasattr(self, \"scheduler\"):\n",
    "                if self.scheduler_settings[0] == \"ReduceLROnPlateau\":\n",
    "                    scheduler_patience = self.scheduler_settings[1]\n",
    "                    scheduler_factor = self.scheduler_settings[2]\n",
    "                    self.scheduler = ReduceLROnPlateau(self.optimizer, factor = scheduler_factor, patience = scheduler_patience, verbose = True)\n",
    "                else:\n",
    "                    raise\n",
    "        if hasattr(self, \"optimizer_domain\"):\n",
    "            lr_domain = self.optimizer_domain.param_groups[0][\"lr\"]\n",
    "            optim_domain_type = self.optim_domain_type[0] if hasattr(self, \"optim_domain_type\") else self.optim_type[0]\n",
    "            self.optimizer_domain = get_optimizer(optim_type = optim_domain_type, lr = lr_domain, parameters = self.domain_net.parameters())\n",
    "            if hasattr(self, \"scheduler_domain\"):\n",
    "                if self.scheduler_settings[0] == \"ReduceLROnPlateau\":\n",
    "                    scheduler_patience = self.scheduler_settings[1]\n",
    "                    scheduler_factor = self.scheduler_settings[2]\n",
    "                    self.scheduler_domain = ReduceLROnPlateau(self.optimizer_domain, factor = scheduler_factor, patience = scheduler_patience, verbose = True)\n",
    "                else:\n",
    "                    raise\n",
    "    \n",
    "    \n",
    "    def pred_nets_forward(self, input):\n",
    "        output, _ = get_preds_valid(self.net_dict, input, forward_steps = 1, is_Lagrangian = self.is_Lagrangian)\n",
    "        return output\n",
    "    \n",
    "    \n",
    "    def domain_net_forward(self, input):\n",
    "        if hasattr(self, \"autoencoder\"):\n",
    "            input = self.autoencoder.encode(input)\n",
    "        return self.domain_net(input)\n",
    "\n",
    "\n",
    "    def forward_one_step(self, input, domain_pred_mode = \"onehot\"):\n",
    "        preds, _ = get_preds_valid(self.net_dict, input, forward_steps = 1, is_Lagrangian = self.is_Lagrangian, disable_autoencoder = True)\n",
    "        if domain_pred_mode == \"onehot\":\n",
    "            valid = self.domain_net(input).max(1)[1]\n",
    "            valid_onehot = to_one_hot(valid, self.num_theories)\n",
    "            output = torch.masked_select(preds, to_Boolean(valid_onehot).unsqueeze(-1)).view(-1, preds.size(-1))\n",
    "        elif domain_pred_mode == \"prob\":\n",
    "            valid_prob = nn.Softmax(dim = 1)(self.domain_net(input))\n",
    "            output = (preds * valid_prob.unsqueeze(-1)).sum(1)\n",
    "        else:\n",
    "            raise Exception(\"domain_pred_mode {0} not recognized!\".format(domain_pred_mode))\n",
    "        return output\n",
    "\n",
    "\n",
    "    def forward(self, input, forward_steps = 1, output_format = \"last\", domain_pred_mode = \"onehot\"):\n",
    "        # Encode:\n",
    "        if hasattr(self, \"autoencoder\"):\n",
    "            output_lat = self.autoencoder.encode(input)\n",
    "        else:\n",
    "            output_lat = input\n",
    "        \n",
    "        # Make prediction:\n",
    "        if forward_steps == 1:\n",
    "            output_pred = self.forward_one_step(output_lat, domain_pred_mode = domain_pred_mode)\n",
    "            if hasattr(self, \"autoencoder\"):\n",
    "                output_pred = self.autoencoder.decode(output_pred)\n",
    "            return output_pred\n",
    "        dim = self.pred_nets.struct_param[0][-1][0]\n",
    "        pred_list = []\n",
    "        for i in range(forward_steps):\n",
    "            new_pred = self.forward_one_step(output_lat, domain_pred_mode = domain_pred_mode)\n",
    "            if output_format == \"all\":\n",
    "                pred_list.append(new_pred)\n",
    "            if i != forward_steps - 1:\n",
    "                output_lat = torch.cat([output_lat[:, dim:], new_pred], 1)\n",
    "        if output_format == \"last\":\n",
    "            output_lat = new_pred\n",
    "        elif output_format == \"all\":\n",
    "            output_lat = torch.cat(pred_list, 1)\n",
    "        else:\n",
    "            raise Exception(\"output_format {0} not recognized!\".format(output_format))\n",
    "\n",
    "        # Decode:\n",
    "        if hasattr(self, \"autoencoder\"):\n",
    "            return self.autoencoder.decode(output_lat)\n",
    "        else:\n",
    "            return output_lat\n",
    "\n",
    "\n",
    "    def get_losses(self, X, y, mode = \"all\", forward_steps = 1, domain_pred_mode = \"onehot\", output_format = \"value\", is_mean = True, **kwargs):\n",
    "        if mode == \"all\":\n",
    "            mode = [\"loss\", \"loss_with_domain\", \"loss_without_domain\", \"mse_with_domain\", \"mse_without_domain\", \n",
    "                    \"loss_best\", \"loss_indi_theory\", \"loss_recons\", \"reg\", \"reg_smooth\", \"loss_domain\", \"reg_domain\", \"fraction_list_best\", \"fraction_list_domain\",\n",
    "                    \"DL_pred_nets\", \"DL_domain_net\", \"DL_data\", \"DL_data_absolute\", \"loss_precision_floor\", \"metrics_big_domain\"]\n",
    "        if not isinstance(mode, list):\n",
    "            mode = [mode]\n",
    "        all_losses_dict = {}\n",
    "        if domain_pred_mode is None:\n",
    "            domain_pred_mode_used = self.domain_pred_mode\n",
    "        else:\n",
    "            domain_pred_mode_used = domain_pred_mode\n",
    "        if hasattr(self, \"autoencoder\"):\n",
    "            X_lat = self.autoencoder.encode(X)\n",
    "        else:\n",
    "            X_lat = X\n",
    "        \n",
    "        for mode_ele in mode:\n",
    "            if mode_ele == \"loss\":\n",
    "                loss, loss_dict = get_loss(net_dict = self.net_dict, X = X, y = y, loss_types = self.loss_types, \n",
    "                                           forward_steps = forward_steps,\n",
    "                                           domain_net = self.domain_net if self.domain_net_on else None,\n",
    "                                           domain_pred_mode = domain_pred_mode_used,\n",
    "                                           loss_fun_dict = self.loss_fun_dict,\n",
    "                                           is_Lagrangian = self.is_Lagrangian,\n",
    "                                           is_mean = is_mean,\n",
    "                                          )\n",
    "                if output_format == \"value\":\n",
    "                    loss = to_np_array(loss)\n",
    "                    loss_dict = {key: to_np_array(value) for key, value in loss_dict.items()}\n",
    "                all_losses_dict[mode_ele] = loss\n",
    "                all_losses_dict[\"loss_dict\"] = loss_dict\n",
    "            elif mode_ele == \"loss_with_domain\":\n",
    "                loss, loss_dict = get_loss(net_dict = self.net_dict, X = X, y = y, loss_types = self.loss_types, \n",
    "                                           forward_steps = forward_steps,\n",
    "                                           domain_net = self.domain_net,\n",
    "                                           domain_pred_mode = domain_pred_mode_used,\n",
    "                                           loss_fun_dict = self.loss_fun_dict,\n",
    "                                           is_Lagrangian = self.is_Lagrangian,\n",
    "                                           is_mean = is_mean,\n",
    "                                          )\n",
    "                if output_format == \"value\":\n",
    "                    loss = to_np_array(loss)\n",
    "                all_losses_dict[mode_ele] = loss\n",
    "            elif mode_ele == \"loss_without_domain\":\n",
    "                loss, loss_dict = get_loss(net_dict = self.net_dict, X = X, y = y, loss_types = self.loss_types,\n",
    "                                           forward_steps = forward_steps,\n",
    "                                           domain_net = None,\n",
    "                                           domain_pred_mode = domain_pred_mode_used,\n",
    "                                           loss_fun_dict = self.loss_fun_dict,\n",
    "                                           is_Lagrangian = self.is_Lagrangian,\n",
    "                                           is_mean = is_mean,\n",
    "                                          )\n",
    "                if output_format == \"value\":\n",
    "                    loss = to_np_array(loss)\n",
    "                all_losses_dict[mode_ele] = loss\n",
    "            elif mode_ele == \"mse_with_domain\":\n",
    "                loss, loss_dict = get_loss(net_dict = self.net_dict, X = X, y = y, loss_types = {\"pred-based_mean\": {\"amp\": 1.}},\n",
    "                                           forward_steps = forward_steps,\n",
    "                                           domain_net = self.domain_net,\n",
    "                                           domain_pred_mode = domain_pred_mode_used,\n",
    "                                           loss_fun_dict = {\"loss_fun_cumu\": Loss_Fun_Cumu(core = \"mse\", cumu_mode = \"mean\", balance_model_influence = False, epsilon = 0)},\n",
    "                                           is_Lagrangian = self.is_Lagrangian,\n",
    "                                           is_mean = is_mean,\n",
    "                                          )\n",
    "                if output_format == \"value\":\n",
    "                    loss = to_np_array(loss)\n",
    "                all_losses_dict[mode_ele] = loss\n",
    "            elif mode_ele == \"mse_without_domain\":\n",
    "                loss, loss_dict = get_loss(net_dict = self.net_dict, X = X, y = y, loss_types = {\"pred-based_mean\": {\"amp\": 1.}},\n",
    "                                           forward_steps = forward_steps,\n",
    "                                           domain_net = None,\n",
    "                                           domain_pred_mode = domain_pred_mode_used,\n",
    "                                           loss_fun_dict = {\"loss_fun_cumu\": Loss_Fun_Cumu(core = \"mse\", cumu_mode = \"mean\")},\n",
    "                                           is_Lagrangian = self.is_Lagrangian,\n",
    "                                           is_mean = is_mean,\n",
    "                                          )\n",
    "                if output_format == \"value\":\n",
    "                    loss = to_np_array(loss)\n",
    "                all_losses_dict[mode_ele] = loss\n",
    "            elif mode_ele == \"loss_best\":\n",
    "                loss_best, _ = get_loss(net_dict = self.net_dict, X = X, y = y, loss_types = {\"pred-based_min\": {\"amp\": 1.}},\n",
    "                                        forward_steps = forward_steps, \n",
    "                                        loss_fun_dict = self.loss_fun_dict,\n",
    "                                        is_Lagrangian = self.is_Lagrangian,\n",
    "                                        is_mean = is_mean,\n",
    "                                       )\n",
    "                if output_format == \"value\":\n",
    "                    loss_best = to_np_array(loss_best)\n",
    "                all_losses_dict[\"loss_best\"] = loss_best\n",
    "            elif mode_ele == \"loss_indi_theory\":\n",
    "                # Get individual theory loss:\n",
    "                loss_indi_theory = {}\n",
    "                preds, _ = get_preds_valid(self.net_dict, X_lat, forward_steps = forward_steps, is_Lagrangian = self.is_Lagrangian)\n",
    "                for i in range(self.num_theories):\n",
    "                    valid_onehot = to_one_hot(nn.Softmax(dim = 1)(self.domain_net(X_lat)).max(1)[1], self.num_theories).float()\n",
    "                    if self.is_cuda:\n",
    "                        valid_onehot = valid_onehot.cuda()\n",
    "                    loss_indi_theory_i = self.loss_fun_cumu(preds[:, i:i+1], y, sample_weights = valid_onehot[:,i:i+1], is_mean = is_mean)\n",
    "                    if output_format == \"value\":\n",
    "                        loss_indi_theory_i = to_np_array(loss_indi_theory_i)\n",
    "                    loss_indi_theory[i] = loss_indi_theory_i\n",
    "                all_losses_dict[\"loss_indi_theory\"] = loss_indi_theory\n",
    "            elif mode_ele == \"loss_recons\":\n",
    "                if hasattr(self, \"autoencoder\"):\n",
    "                    loss_recons = self.autoencoder.get_loss(X, X, nn.MSELoss()) * self.optim_autoencoder_type[2]\n",
    "                    if output_format == \"value\":\n",
    "                        loss_recons = to_np_array(loss_recons)\n",
    "                    all_losses_dict[\"loss_recons\"] = loss_recons\n",
    "            elif mode_ele == \"reg\":\n",
    "                reg, reg_dict = get_reg(net_dict = self.net_dict, reg_dict = self.reg_dict if hasattr(self, \"reg_dict\") else {}, \n",
    "                                        mode = self.reg_mode if hasattr(self, \"reg_dict\") else None, is_cuda = self.is_cuda)\n",
    "                if output_format == \"value\":\n",
    "                    reg = to_np_array(reg)\n",
    "                    reg_dict = {key: to_np_array(value) for key, value in reg_dict.items()}\n",
    "                if hasattr(self, \"reg_multiplier_model\"):\n",
    "                    reg = reg * self.reg_multiplier_model\n",
    "                all_losses_dict[\"reg\"] = reg\n",
    "                all_losses_dict[\"reg_dict\"] = reg_dict\n",
    "            elif mode_ele == \"reg_smooth\":\n",
    "                reg_smooth_in = kwargs[\"reg_smooth\"] if \"reg_smooth\" in kwargs else None\n",
    "                if reg_smooth_in is not None:\n",
    "                    reg_smooth = reg_smooth_in\n",
    "                else:\n",
    "                    reg_smooth = (0.05, 2, 10, 1e-6, 1)\n",
    "                num_samples = reg_smooth[4]\n",
    "                input_noise_scale = reg_smooth[0]\n",
    "                diff_list = []\n",
    "                for _ in range(num_samples):\n",
    "                    input_perturb = Variable(torch.randn(*X.size()) * input_noise_scale)\n",
    "                    if self.is_cuda:\n",
    "                        input_perturb = input_perturb.cuda()\n",
    "                    diff = self.pred_nets_forward(X + input_perturb) - self.pred_nets_forward(X)\n",
    "                    diff_list.append(diff)\n",
    "                diff_list = torch.stack(diff_list, 2)\n",
    "                smooth_norms = get_group_norm(diff_list, reg_smooth[1], reg_smooth[2])\n",
    "                reg_smooth_amp = reg_smooth[3] if reg_smooth_in is not None else 0\n",
    "                reg_smooth_value = smooth_norms.mean() * reg_smooth[3]\n",
    "                if hasattr(self, \"reg_multiplier_model\"):\n",
    "                    reg_smooth_value = reg_smooth_value * self.reg_multiplier_model\n",
    "                if output_format == \"value\":\n",
    "                    smooth_norms = to_np_array(smooth_norms)\n",
    "                    reg_smooth_value = to_np_array(reg_smooth_value)\n",
    "                all_losses_dict[\"smooth_norms\"] = smooth_norms\n",
    "                all_losses_dict[\"reg_smooth_value\"] = reg_smooth_value        \n",
    "#             elif mode_ele == \"reg_grad\":\n",
    "#                 reg_grad = kwargs[\"reg_grad\"] if \"reg_grad\" in kwargs else None\n",
    "#                 if reg_grad is not None:\n",
    "#                     X.requires_grad = True\n",
    "#                     loss_indi = self.loss_fun_cumu(self.pred_nets(X), y, cumu_mode = \"original\", is_mean = False).mean(0)\n",
    "#                     grad_norms = torch.cat([get_group_norm(grad(loss_indi[i], X, create_graph = True)[0], reg_grad[0], reg_grad[1]) for i in range(self.num_theories)])\n",
    "#                     X.requires_grad = False\n",
    "#                     reg_grad_value = grad_norms.mean() * reg_grad[2]\n",
    "#                     if hasattr(self, \"reg_multiplier_model\"):\n",
    "#                         reg_grad_value = reg_grad_value * self.reg_multiplier_model\n",
    "#                     if output_format == \"value\":\n",
    "#                         grad_norms = to_np_array(grad_norms)\n",
    "#                         reg_grad_value = to_np_array(reg_grad_value)\n",
    "#                     all_losses_dict[\"grad_norms\"] = grad_norms\n",
    "#                     all_losses_dict[\"reg_grad_value\"] = reg_grad_value\n",
    "            elif mode_ele == \"loss_domain\":\n",
    "                best_theory_idx_test = get_best_model_idx(self.net_dict, X, y, loss_fun_cumu = self.loss_fun_cumu, forward_steps = forward_steps, is_Lagrangian = self.is_Lagrangian)\n",
    "                loss_domain = nn.CrossEntropyLoss(size_average = is_mean)(self.domain_net(X_lat), best_theory_idx_test)\n",
    "                if output_format == \"value\":\n",
    "                    loss_domain = to_np_array(loss_domain)\n",
    "                all_losses_dict[\"loss_domain\"] = loss_domain\n",
    "            elif mode_ele == \"reg_domain\":\n",
    "                reg_domain, reg_domain_dict = get_reg(net_dict = self.net_dict, reg_dict = self.reg_domain_dict if hasattr(self, \"reg_domain_dict\") else {}, \n",
    "                                                      mode = self.reg_domain_mode if hasattr(self, \"reg_domain_dict\") else None, is_cuda = self.is_cuda)\n",
    "                if output_format == \"value\":\n",
    "                    reg_domain = to_np_array(reg_domain)\n",
    "                    reg_domain_dict = {key: to_np_array(value) for key, value in reg_domain_dict.items()}\n",
    "                if hasattr(self, \"reg_multiplier_domain\"):\n",
    "                    reg_domain = reg_domain * self.reg_multiplier_domain\n",
    "                all_losses_dict[\"reg_domain\"] = reg_domain\n",
    "                all_losses_dict[\"reg_domain_dict\"] = reg_domain_dict\n",
    "            elif mode_ele == \"fraction_list_best\":\n",
    "                all_losses_dict[\"fraction_list_best\"] = self.get_fraction_list(X, y, mode = \"best\")\n",
    "            elif mode_ele == \"fraction_list_domain\":\n",
    "                all_losses_dict[\"fraction_list_domain\"] = self.get_fraction_list(X, mode = \"domain\")\n",
    "            elif mode_ele == \"DL_pred_nets\":\n",
    "                all_losses_dict[\"DL_pred_nets\"] = self.pred_nets.DL\n",
    "            elif mode_ele == \"DL_domain_net\":\n",
    "                all_losses_dict[\"DL_domain_net\"] = self.domain_net.DL\n",
    "            elif mode_ele == \"DL_data\":\n",
    "                pred = self(X)\n",
    "                DL_mode = kwargs[\"DL_mode\"] if \"DL_mode\" in kwargs else \"DLs\"\n",
    "                DL_criterion = Loss_Fun(core = DL_mode, loss_precision_floor = self.loss_precision_floor, DL_sum = True)\n",
    "                DL_data = DL_criterion(pred, y)\n",
    "                all_losses_dict[\"DL_data\"] = to_np_array(DL_data)\n",
    "            elif mode_ele == \"DL_data_absolute\":\n",
    "                pred = self(X)\n",
    "                DL_mode = kwargs[\"DL_mode\"] if \"DL_mode\" in kwargs else \"DLs\"\n",
    "                DL_criterion = Loss_Fun(core = DL_mode, loss_precision_floor = PrecisionFloorLoss, DL_sum = True)\n",
    "                DL_data = DL_criterion(pred, y)\n",
    "                all_losses_dict[\"DL_data_absolute\"] = to_np_array(DL_data)\n",
    "            elif mode_ele == \"loss_precision_floor\":\n",
    "                all_losses_dict[\"loss_precision_floor\"] = deepcopy(self.loss_precision_floor)\n",
    "            elif mode_ele == \"metrics_big_domain\":\n",
    "                if \"big_domain_ids\" in kwargs and \"true_domain_test\" in kwargs and kwargs[\"big_domain_ids\"] is not None and kwargs[\"true_domain_test\"] is not None:\n",
    "                    predicted_domain = self.domain_net(X_lat).max(1)[1]\n",
    "                    (union, predicted_big_domains, true_big_domains, intersection), _ = count_metrics_pytorch(predicted_domain, true_domain = kwargs[\"true_domain_test\"], big_domain_ids = kwargs[\"big_domain_ids\"], verbose = False)\n",
    "\n",
    "                    true_domain_np = to_np_array(kwargs[\"true_domain_test\"]).flatten()\n",
    "                    idx_in_big = torch.LongTensor(np.array([i for i in range(len(true_domain_np)) if int(true_domain_np[i]) in kwargs[\"big_domain_ids\"]]))\n",
    "                    predicted_domain_in_big = predicted_domain[idx_in_big]\n",
    "                    true_domain_in_big = kwargs[\"true_domain_test\"][idx_in_big]\n",
    "                    (union_in_big, predicted_big_domains_in_big, true_big_domains_in_big, intersection_in_big), _ = count_metrics_pytorch(predicted_domain_in_big, true_domain = true_domain_in_big, big_domain_ids = kwargs[\"big_domain_ids\"], verbose = True if \"verbose\" in kwargs and kwargs[\"verbose\"] is True else False)\n",
    "                    \n",
    "                    assert union_in_big == true_big_domains_in_big, \"For in_big, the three quantities must be equal!\"\n",
    "                    if true_big_domains is not None:\n",
    "                        assert union_in_big == true_big_domains, \"For in_big, the three quantities must be equal!\"\n",
    "\n",
    "                    metrics = {\"union\": union,\n",
    "                               \"predicted_big_domains\": predicted_big_domains,\n",
    "                               \"true_big_domains\": union_in_big,\n",
    "                               \"intersection\": intersection,\n",
    "                               \"intersection_in_big\": intersection_in_big,\n",
    "                              }\n",
    "                    is_big_domain = to_Boolean(torch.zeros(X.size(0), 1))\n",
    "                    if X.is_cuda:\n",
    "                        is_big_domain = is_big_domain.cuda()\n",
    "                    true_domain_test = kwargs[\"true_domain_test\"]\n",
    "                    for big_domain_id in kwargs[\"big_domain_ids\"]:\n",
    "                        is_big_domain = is_big_domain | to_Boolean(true_domain_test == big_domain_id)\n",
    "                    if len(X.shape) == 4:\n",
    "                        is_big_domain = is_big_domain.unsqueeze(-1).unsqueeze(-1)\n",
    "                    is_big_domain = to_Variable(is_big_domain, is_cuda = self.is_cuda)\n",
    "                    X_big_domain = torch.masked_select(X, is_big_domain).view(-1, *X.size()[1:])\n",
    "                    y_big_domain = torch.masked_select(y, is_big_domain).view(-1, *y.size()[1:])\n",
    "                    X_lat_big_domain = torch.masked_select(X_lat, is_big_domain.view(-1, 1)).view(-1, *X_lat.size()[1:])\n",
    "                    \n",
    "                    # Loss:\n",
    "                    loss_big_domain, loss_dict_big_domain = get_loss(net_dict = self.net_dict, X = X_big_domain, y = y_big_domain, loss_types = self.loss_types, \n",
    "                                                                       forward_steps = forward_steps,\n",
    "                                                                       domain_net = self.domain_net if self.domain_net_on else None,\n",
    "                                                                       domain_pred_mode = domain_pred_mode_used,\n",
    "                                                                       loss_fun_dict = self.loss_fun_dict,\n",
    "                                                                       is_Lagrangian = self.is_Lagrangian,\n",
    "                                                                       is_mean = is_mean,\n",
    "                                                                      )\n",
    "                    if output_format == \"value\":\n",
    "                        loss_big_domain = to_np_array(loss_big_domain)\n",
    "                        loss_dict_big_domain = {key: to_np_array(value) for key, value in loss_dict_big_domain.items()}\n",
    "                    metrics[\"loss_big_domain\"] = loss_big_domain\n",
    "                    metrics[\"loss_dict_big_domain\"] = loss_dict_big_domain\n",
    "                    \n",
    "                    # loss_with_domain:\n",
    "                    loss_big_domain, _ = get_loss(net_dict = self.net_dict, X = X_big_domain, y = y_big_domain, loss_types = {\"pred-based_mean\": {\"amp\": 1.}},\n",
    "                                                   forward_steps = forward_steps,\n",
    "                                                   domain_net = self.domain_net,\n",
    "                                                   domain_pred_mode = domain_pred_mode_used,\n",
    "                                                   loss_fun_dict = {\"loss_fun_cumu\": Loss_Fun_Cumu(core = \"mse\", cumu_mode = \"mean\", balance_model_influence = False, epsilon = 0)},\n",
    "                                                   is_Lagrangian = self.is_Lagrangian,\n",
    "                                                   is_mean = is_mean,\n",
    "                                                  )\n",
    "                    if output_format == \"value\":\n",
    "                        loss_big_domain = to_np_array(loss_big_domain)\n",
    "                    metrics[\"loss_with_domain_big_domain\"] = loss_big_domain\n",
    "                    \n",
    "                    # mse_with_domain:\n",
    "                    loss_big_domain, _ = get_loss(net_dict = self.net_dict, X = X_big_domain, y = y_big_domain, loss_types = {\"pred-based_mean\": {\"amp\": 1.}},\n",
    "                                                   forward_steps = forward_steps,\n",
    "                                                   domain_net = self.domain_net,\n",
    "                                                   domain_pred_mode = domain_pred_mode_used,\n",
    "                                                   loss_fun_dict = {\"loss_fun_cumu\": Loss_Fun_Cumu(core = \"mse\", cumu_mode = \"mean\", balance_model_influence = False, epsilon = 0)},\n",
    "                                                   is_Lagrangian = self.is_Lagrangian,\n",
    "                                                   is_mean = is_mean,\n",
    "                                                  )\n",
    "                    if output_format == \"value\":\n",
    "                        loss_big_domain = to_np_array(loss_big_domain)\n",
    "                    metrics[\"mse_with_domain_big_domain\"] = loss_big_domain\n",
    "                    \n",
    "                    # mse_indi_theory_big_domain:\n",
    "                    mse_indi_theory_big_domain = {}\n",
    "                    preds_big_domain, _ = get_preds_valid(self.net_dict, X_lat_big_domain, forward_steps = forward_steps, is_Lagrangian = self.is_Lagrangian)\n",
    "                    for i in range(self.num_theories):\n",
    "                        valid_onehot_big_domain = to_one_hot(nn.Softmax(dim = 1)(self.domain_net(X_lat_big_domain)).max(1)[1], self.num_theories).float()\n",
    "                        if self.is_cuda:\n",
    "                            valid_onehot_big_domain = valid_onehot_big_domain.cuda()\n",
    "                        loss_fun_cumu = Loss_Fun_Cumu(core = \"mse\", cumu_mode = \"mean\", balance_model_influence = False, epsilon = 0)\n",
    "                        mse_indi_theory_i_big_domain = loss_fun_cumu(preds_big_domain[:, i:i+1], y_big_domain, sample_weights = valid_onehot_big_domain[:,i:i+1], is_mean = is_mean)\n",
    "                        if output_format == \"value\":\n",
    "                            mse_indi_theory_i_big_domain = to_np_array(mse_indi_theory_i_big_domain)\n",
    "                        mse_indi_theory_big_domain[i] = mse_indi_theory_i_big_domain\n",
    "                    all_losses_dict[\"mse_indi_theory_big_domain\"] = mse_indi_theory_big_domain\n",
    "                    all_losses_dict[\"metrics_big_domain\"] = metrics\n",
    "            else:\n",
    "                raise Exception(\"mode {0} not recognized!\".format(mode_ele))\n",
    "        return all_losses_dict\n",
    "\n",
    "\n",
    "    def get_adaptive_precision_floor(self, X, y, range = (1e-6, 1e-1), nonzero_ratio = 0.5):\n",
    "        num_zero_loss_list = []\n",
    "        U = deepcopy(self)\n",
    "        for dl in np.logspace(np.log10(range[1]), np.log10(range[0]), 200):\n",
    "            U.set_loss_core(\"DL\", dl)\n",
    "            num_zero_loss = (U.get_losses(X, y, mode = \"loss_with_domain\", is_mean = False)[\"loss_with_domain\"] < 2 ** (-32)).sum()\n",
    "            if num_zero_loss < len(X) * nonzero_ratio:\n",
    "                break\n",
    "        return dl\n",
    "\n",
    "\n",
    "    def iterative_train_schedule(\n",
    "        self,\n",
    "        X_train,\n",
    "        y_train,\n",
    "        validation_data = None,\n",
    "        optim_type = (\"adam\", 5e-3),\n",
    "        reg_dict = {},\n",
    "        reg_mode = \"L1\",\n",
    "        domain_fit_setting = None,\n",
    "        forward_steps = 1,\n",
    "        domain_pred_mode = \"onehot\",\n",
    "        grad_clipping = None,\n",
    "        scheduler_settings = (\"ReduceLROnPlateau\", 30, 0.1),\n",
    "        loss_order_decay = None,\n",
    "        gradient_noise = None,\n",
    "        epochs = None,\n",
    "        batch_size = None,\n",
    "        inspect_interval = 1000,\n",
    "        patience = None,\n",
    "        change_interval = 1,\n",
    "        record_interval = None,\n",
    "        isplot = True,\n",
    "        filename = None,\n",
    "        view_init = (10, 190),\n",
    "        raise_nan = True,\n",
    "        add_theory_quota = 1,\n",
    "        add_theory_criteria = (\"mse_with_domain\", 0),\n",
    "        add_theory_loss_threshold = None,\n",
    "        theory_remove_fraction_threshold = None,\n",
    "        loss_floor = 1e-12,\n",
    "        prefix = None,\n",
    "        num_phases = 3,\n",
    "        **kwargs\n",
    "        ):\n",
    "        \"\"\"Implements steps 2 to 6 in Alg. 2 in Wu and Tegmark (2019)\"\"\"\n",
    "        if self.get_loss_core() == \"mse\":\n",
    "            return self.iterative_train(\n",
    "                                X_train = X_train,\n",
    "                                y_train = y_train,\n",
    "                                validation_data = validation_data,\n",
    "                                optim_type = optim_type,\n",
    "                                reg_dict = reg_dict,\n",
    "                                reg_mode = reg_mode,\n",
    "                                domain_fit_setting = domain_fit_setting,\n",
    "                                forward_steps = forward_steps,\n",
    "                                domain_pred_mode = domain_pred_mode,\n",
    "                                grad_clipping = grad_clipping,\n",
    "                                scheduler_settings = scheduler_settings,\n",
    "                                loss_order_decay = loss_order_decay,\n",
    "                                gradient_noise = gradient_noise,\n",
    "                                epochs = epochs,\n",
    "                                batch_size = batch_size,\n",
    "                                inspect_interval = inspect_interval,\n",
    "                                patience = patience,\n",
    "                                change_interval = change_interval,\n",
    "                                record_interval = record_interval,\n",
    "                                isplot = isplot,\n",
    "                                filename = filename,\n",
    "                                view_init = view_init,\n",
    "                                raise_nan = raise_nan,\n",
    "                                add_theory_quota = add_theory_quota,\n",
    "                                add_theory_criteria = add_theory_criteria,\n",
    "                                add_theory_loss_threshold = add_theory_loss_threshold,\n",
    "                                loss_floor = loss_floor,\n",
    "                                prefix = prefix,\n",
    "                                **kwargs\n",
    "                                )\n",
    "        elif \"DL\" in self.get_loss_core():\n",
    "            if validation_data is None:\n",
    "                validation_data = (X_train, y_train)\n",
    "            X_test, y_test = validation_data\n",
    "            data_record_whole = []\n",
    "            for ii in range(num_phases):\n",
    "                U = deepcopy(self)\n",
    "                if \"fix_adaptive_precision_floor\" in kwargs and kwargs[\"fix_adaptive_precision_floor\"] is True:\n",
    "                    dl = U.loss_fun_cumu.loss_fun.loss_precision_floor\n",
    "                else:\n",
    "                    dl = U.get_adaptive_precision_floor(X_test, y_test, range = (1e-6, 1e-1), nonzero_ratio = 0.5)\n",
    "                    U.set_loss_core(U.loss_core, dl)\n",
    "                print(\"## Phase {0}:\\tcurrent DL precision_floor: {1:.9f}\".format(ii, dl))\n",
    "                # Tentative fitting:\n",
    "                loss_dict = U.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "                data_record = U.iterative_train(\n",
    "                                X_train = X_train,\n",
    "                                y_train = y_train,\n",
    "                                validation_data = validation_data,\n",
    "                                optim_type = optim_type,\n",
    "                                reg_dict = reg_dict,\n",
    "                                reg_mode = reg_mode,\n",
    "                                domain_fit_setting = domain_fit_setting,\n",
    "                                forward_steps = forward_steps,\n",
    "                                domain_pred_mode = domain_pred_mode,\n",
    "                                grad_clipping = grad_clipping,\n",
    "                                scheduler_settings = scheduler_settings,\n",
    "                                loss_order_decay = loss_order_decay,\n",
    "                                gradient_noise = gradient_noise,\n",
    "                                epochs = epochs,\n",
    "                                batch_size = batch_size,\n",
    "                                inspect_interval = inspect_interval,\n",
    "                                patience = patience,\n",
    "                                change_interval = change_interval,\n",
    "                                record_interval = record_interval,\n",
    "                                isplot = isplot,\n",
    "                                filename = filename,\n",
    "                                view_init = view_init,\n",
    "                                raise_nan = raise_nan,\n",
    "                                add_theory_quota = add_theory_quota,\n",
    "                                add_theory_criteria = add_theory_criteria,\n",
    "                                add_theory_loss_threshold = add_theory_loss_threshold,\n",
    "                                loss_floor = loss_floor,\n",
    "                                prefix = prefix,\n",
    "                                **kwargs\n",
    "                                )\n",
    "                data_record[\"loss_precision_floor\"] = dl\n",
    "                loss_dict_new = U.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "                if theory_remove_fraction_threshold is not None:\n",
    "                    data_record[\"removed_theories\"] = U.remove_theories_based_on_data(X_test, y_test, threshold = theory_remove_fraction_threshold)\n",
    "                if loss_dict_new[\"loss_with_domain\"] >= loss_dict[\"loss_with_domain\"]:\n",
    "                    print(\"The loss_with_domain {0} is larger than previous {1}. Revert and abort fitting.\".format(loss_dict_new[\"loss_with_domain\"], loss_dict[\"loss_with_domain\"]))\n",
    "                    break\n",
    "                else:\n",
    "                    print(\"The loss_with_domain {0} decrease from previous {1}. Accept and continue training.\".format(loss_dict_new[\"loss_with_domain\"], loss_dict[\"loss_with_domain\"]))\n",
    "                    data_record_whole.append(data_record)\n",
    "                    self.__dict__.update(U.__dict__)\n",
    "                if loss_dict_new[\"mse_with_domain\"] < loss_floor:\n",
    "                    print(\"mse_with_domain = {0} is below the floor level {1}, stop.\".format(loss_dict_new[\"mse_with_domain\"], loss_floor))\n",
    "                    break\n",
    "            return data_record_whole\n",
    "        else:\n",
    "            raise\n",
    "    \n",
    "\n",
    "    def iterative_train(\n",
    "        self,\n",
    "        X_train,\n",
    "        y_train,\n",
    "        validation_data = None,\n",
    "        optim_type = (\"adam\", 5e-3),\n",
    "        reg_dict = {},\n",
    "        reg_mode = \"L1\",\n",
    "        domain_fit_setting = None,\n",
    "        forward_steps = 1,\n",
    "        domain_pred_mode = \"onehot\",\n",
    "        grad_clipping = None,\n",
    "        scheduler_settings = (\"ReduceLROnPlateau\", 30, 0.1),\n",
    "        loss_order_decay = None,\n",
    "        gradient_noise = None,\n",
    "        epochs = None,\n",
    "        inspect_interval = 1000,\n",
    "        patience = None,\n",
    "        change_interval = 1,\n",
    "        record_interval = None,\n",
    "        isplot = True,\n",
    "        filename = None,\n",
    "        view_init = (10, 190),\n",
    "        raise_nan = True,\n",
    "        add_theory_quota = 1,\n",
    "        add_theory_criteria = (\"mse_with_domain\", 0),\n",
    "        add_theory_loss_threshold = None,\n",
    "        loss_floor = 1e-12,\n",
    "        prefix = None,\n",
    "        **kwargs\n",
    "        ):\n",
    "        \"\"\"Implements the IterativeTrain algorithm in Alg. 2 in Wu and Tegmark (2019)\"\"\"\n",
    "        X_test, y_test = validation_data\n",
    "        self.optim_type = optim_type\n",
    "        self.reg_dict = reg_dict\n",
    "        self.reg_mode = reg_mode\n",
    "        self.forward_steps = forward_steps\n",
    "        self.domain_pred_mode = domain_pred_mode\n",
    "        self.grad_clipping = grad_clipping\n",
    "        self.scheduler_settings = scheduler_settings\n",
    "        self.loss_order_decay = loss_order_decay\n",
    "        self.gradient_noise = gradient_noise\n",
    "        self.domain_fit_setting = domain_fit_setting\n",
    "        print(\"scheduler_settings:\", self.scheduler_settings)\n",
    "        print(\"grad_clipping:\", self.grad_clipping)\n",
    "        print(\"forward_steps:\", forward_steps)\n",
    "        print(\"loss_order_decay:\", loss_order_decay)\n",
    "        print(\"gradient_noise:\", gradient_noise)\n",
    "        print(\"domain_fit_setting:\", domain_fit_setting)\n",
    "        print(\"loss_types: \", self.loss_types)\n",
    "        self.optim_autoencoder_type = kwargs[\"optim_autoencoder_type\"] if \"optim_autoencoder_type\" in kwargs else None\n",
    "        print(\"optim_autoencoder_type: \", self.optim_autoencoder_type)\n",
    "        batch_size = kwargs[\"batch_size\"] if \"batch_size\" in kwargs else None\n",
    "        print(\"batch_size:\", batch_size)\n",
    "        record_mode = kwargs[\"record_mode\"] if \"record_mode\" in kwargs else 1\n",
    "        print(\"record_mode: {0}\".format(record_mode))\n",
    "        reg_smooth = kwargs[\"reg_smooth\"] if \"reg_smooth\" in kwargs else None\n",
    "        print(\"reg_smooth: {0}\".format(reg_smooth))\n",
    "#         reg_grad = kwargs[\"reg_grad\"] if \"reg_grad\" in kwargs else None\n",
    "#         print(\"reg_grad: {0}\".format(reg_grad))\n",
    "        if \"big_domain_ids\" in kwargs:\n",
    "            print(\"big_domain_ids: \", kwargs[\"big_domain_ids\"])\n",
    "        add_theory_limit = kwargs[\"add_theory_limit\"] if \"add_theory_limit\" in kwargs else None\n",
    "        print(\"add_theory_limit: {0}\".format(add_theory_limit))\n",
    "        print()\n",
    "\n",
    "\n",
    "        if validation_data is None:\n",
    "            validation_data = (X_train, y_train)\n",
    "\n",
    "        # Setting up optimizer:\n",
    "        if not hasattr(self, \"optimizer\"):\n",
    "            trainable_parameters = [parameter for parameter in self.pred_nets.parameters() if parameter.requires_grad]\n",
    "            self.optimizer = get_optimizer(optim_type = self.optim_type[0], lr = self.optim_type[1], parameters = trainable_parameters)\n",
    "        else:\n",
    "            new_lr = np.sqrt(self.optimizer.param_groups[0][\"lr\"] * self.optim_type[1])\n",
    "            for param_group in self.optimizer.param_groups:\n",
    "                param_group[\"lr\"] = new_lr\n",
    "        if domain_fit_setting is not None:\n",
    "            if not hasattr(self, \"optimizer_domain\"):\n",
    "                self.optimizer_domain = get_optimizer(optim_type = domain_fit_setting[\"optim_domain_type\"][0], lr = domain_fit_setting[\"optim_domain_type\"][1], parameters = self.domain_net.parameters())\n",
    "        if hasattr(self, \"autoencoder\"):\n",
    "            self.optimizer_autoencoder = get_optimizer(optim_type = self.optim_autoencoder_type[0], lr = self.optim_autoencoder_type[1], parameters = self.autoencoder.parameters())\n",
    "                \n",
    "        if batch_size is None:\n",
    "            if record_interval is None:\n",
    "                record_interval = 10\n",
    "            if self.optim_type[0] == \"LBFGS\":\n",
    "                num_iter = 5000\n",
    "            else:\n",
    "                num_iter = 15000\n",
    "        else:\n",
    "            if record_interval is None:\n",
    "                record_interval = 1\n",
    "            if self.optim_type[0] == \"LBFGS\":\n",
    "                num_iter = 250\n",
    "            else:\n",
    "                num_iter = 1000\n",
    "        assert inspect_interval % record_interval == 0\n",
    "        if epochs is not None:\n",
    "            num_iter = epochs\n",
    "        if batch_size is not None:\n",
    "            dataset_train = data_utils.TensorDataset(X_train.data, y_train.data)\n",
    "            self.train_loader_model = data_utils.DataLoader(dataset_train, batch_size = batch_size, shuffle = True)\n",
    "\n",
    "        # Setting up lr scheduler:\n",
    "        if self.scheduler_settings is not None:\n",
    "            if self.scheduler_settings[0] == \"LambdaLR\":\n",
    "                function_type = scheduler_settings[1]\n",
    "                decay_scale = scheduler_settings[2]\n",
    "                scheduler_continue_decay = scheduler_settings[3]\n",
    "                if function_type == \"exp\":\n",
    "                    lambda_pred = lambda epoch: (1 - 1 / float(num_iter / change_interval / decay_scale)) ** epoch\n",
    "                elif function_type == \"poly\":\n",
    "                    lambda_pred = lambda epoch: 1 / (1 + 0.01 * epoch * change_interval * decay_scale)\n",
    "                else:\n",
    "                    raise\n",
    "                if scheduler_continue_decay:\n",
    "                    if not hasattr(self, \"scheduler\"):\n",
    "                        self.scheduler = LambdaLR(self.optimizer, lr_lambda = lambda_pred)\n",
    "                else:\n",
    "                    self.scheduler = LambdaLR(self.optimizer, lr_lambda = lambda_pred)\n",
    "\n",
    "                if domain_fit_setting is not None:\n",
    "                    if scheduler_continue_decay:\n",
    "                        if not hasattr(self, \"scheduler_domain\"):\n",
    "                            self.scheduler_domain = LambdaLR(self.optimizer_domain, lr_lambda = lambda_pred) \n",
    "                    else:\n",
    "                        self.scheduler_domain = LambdaLR(self.optimizer_domain, lr_lambda = lambda_pred)\n",
    "            elif self.scheduler_settings[0] == \"ReduceLROnPlateau\":\n",
    "                scheduler_patience = self.scheduler_settings[1]\n",
    "                scheduler_factor = self.scheduler_settings[2]\n",
    "                self.scheduler = ReduceLROnPlateau(self.optimizer, factor = scheduler_factor, patience = scheduler_patience, verbose = True)\n",
    "                if domain_fit_setting is not None:\n",
    "                    self.scheduler_domain = ReduceLROnPlateau(self.optimizer_domain, factor = scheduler_factor, patience = scheduler_patience, verbose = True)\n",
    "            else:\n",
    "                raise\n",
    "\n",
    "        # Setting up gradient noise:\n",
    "        if self.gradient_noise is not None:\n",
    "            self.scale_gen = Gradient_Noise_Scale_Gen(gamma = self.gradient_noise[\"gamma\"],\n",
    "                                                      eta = self.gradient_noise[\"eta\"],\n",
    "                                                      gradient_noise_interval_batch = self.gradient_noise[\"gradient_noise_interval_batch\"],\n",
    "                                                      batch_size = len(y_train),\n",
    "                                                     )\n",
    "            gradient_noise_scale = self.scale_gen.generate_scale(epochs = num_iter, num_examples = len(y_train), verbose = True)\n",
    "        else:\n",
    "            self.scale_gen = None\n",
    "\n",
    "        # Setting up loss_order_decay:\n",
    "        if loss_order_decay is not None:\n",
    "            self.loss_decay_scheduler = Loss_Decay_Scheduler(self.loss_types, lambda_loss_decay = loss_order_decay)\n",
    "        else:\n",
    "            self.loss_decay_scheduler = None\n",
    "\n",
    "        figsize = (10, 8)\n",
    "        self.model_dict_last = {}\n",
    "        self.model_dict_second_last = {}\n",
    "        self.data_record = {}\n",
    "        if patience is not None:\n",
    "            self.early_stopping = Early_Stopping(patience = patience, epsilon = 1e-10)\n",
    "        if domain_fit_setting is not None:\n",
    "            if patience is not None:\n",
    "                self.early_stopping_domain = Early_Stopping(patience = patience, epsilon = 1e-10)\n",
    "            to_stop_domain = False\n",
    "        to_stop = False\n",
    "\n",
    "        def show(k):\n",
    "            all_losses_dict = self.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "            if prefix is not None:\n",
    "                print(prefix)\n",
    "            print(\"iter {0}  lr = {1:.9f}\".format(k, self.optimizer.param_groups[0][\"lr\"]))\n",
    "            if domain_fit_setting is not None:\n",
    "                print(\"lr_domain:\\t{0:.9f}\\nloss_domain:\\t{1:.9f}\\nreg_domain:\\t{2:.9f}\".format(self.optimizer_domain.param_groups[0][\"lr\"], all_losses_dict[\"loss_domain\"], all_losses_dict[\"reg_domain\"]))\n",
    "            if hasattr(self, \"autoencoder\"):\n",
    "                print(\"loss_recons:\\t{0:.9f}\".format(all_losses_dict[\"loss_recons\"]))\n",
    "            print(\"loss_best:\\t{0:.9f}\\nloss_model:\\t{1:.9f}\\nloss_with_domain:\\t{2:.9f}\".format(all_losses_dict[\"loss_best\"], all_losses_dict[\"loss\"], all_losses_dict[\"loss_with_domain\"]))\n",
    "            for loss_mode in all_losses_dict[\"loss_dict\"]:\n",
    "                print(\"loss_{0}:\\t{1:.9f}\".format(loss_mode, all_losses_dict[\"loss_dict\"][loss_mode]))\n",
    "            print(\"mse_with_domain:\\t{0:.9f}\\nmse_without_domain:\\t{1:.9f}\".format(all_losses_dict[\"mse_with_domain\"], all_losses_dict[\"mse_without_domain\"]))\n",
    "            for i in range(self.num_theories):\n",
    "                print(\"{0}_theory_{1}:\\t{2:.9f}\\tfraction best: {3:.5f}\\t domain: {4:.5f}\".format(self.loss_fun_cumu.loss_fun.core, i, all_losses_dict[\"loss_indi_theory\"][i], all_losses_dict[\"fraction_list_best\"][i], all_losses_dict[\"fraction_list_domain\"][i]))\n",
    "            print(\"reg:\\t{0:.9f}\".format(all_losses_dict[\"reg\"]))\n",
    "            print(\"reg_smooth_value: {0:.9f}\\tsmooth_norms: {1}\".format(all_losses_dict[\"reg_smooth_value\"], all_losses_dict[\"smooth_norms\"]))\n",
    "#             if reg_grad is not None:\n",
    "#                 print(\"reg_grad_value: {0:.9f}\\tgrad_norms: {1}\".format(all_losses_dict[\"reg_grad_value\"], all_losses_dict[\"grad_norms\"]))\n",
    "            if self.reg_multiplier is not None:\n",
    "                print(\"reg_multiplier_model iter {0}:\\t{1:.9f}\\nreg_multiplier_domain iter {2}\\t{3:.9f}\".format(self.reg_model_idx, self.reg_multiplier_model, self.reg_domain_idx, self.reg_multiplier_domain))\n",
    "            if loss_order_decay is not None:\n",
    "                print(\"loss_order_current:\\t{0}\".format(replaced_loss_order))\n",
    "            if self.scale_gen is not None:\n",
    "                print(\"current gradient noise scale: {0:.9f}\".format(current_gradient_noise_scale))\n",
    "            if \"big_domain_ids\" in kwargs and kwargs[\"big_domain_ids\"] is not None:\n",
    "                if \"metrics_big_domain\" in all_losses_dict:\n",
    "                    union = all_losses_dict[\"metrics_big_domain\"][\"union\"]\n",
    "                    predicted_big_domains = all_losses_dict[\"metrics_big_domain\"][\"predicted_big_domains\"]\n",
    "                    true_big_domains = all_losses_dict[\"metrics_big_domain\"][\"true_big_domains\"]\n",
    "                    intersection = all_losses_dict[\"metrics_big_domain\"][\"intersection\"]\n",
    "                    intersection_in_big = all_losses_dict[\"metrics_big_domain\"][\"intersection_in_big\"]\n",
    "                    if union is not None:\n",
    "                        precision = intersection / float(predicted_big_domains)\n",
    "                        recall = intersection / float(true_big_domains)\n",
    "                        F1 = 2 / (1 / precision  + 1 / recall)\n",
    "                        IoU = intersection / float(union)\n",
    "                        print(\"union: {0}\\tpredicted_big_domains: {1}\\ttrue_big_domains: {2}\\tintersection_in_big: {3}\\tintersection: {4}\".format(union, predicted_big_domains, true_big_domains, intersection_in_big, intersection))\n",
    "                        print(\"Precision: {0:.4f}\\tRecall: {1:.4f}\\tF1: {2:.4f}\\tIoU: {3:.4f}\".format(precision, recall, F1, IoU))\n",
    "                    print(\"loss_big_domain: {0:.9f}\".format(all_losses_dict[\"metrics_big_domain\"][\"loss_big_domain\"]))\n",
    "                    print(\"loss_with_domain_big_domain: {0:.9f}\".format(all_losses_dict[\"metrics_big_domain\"][\"loss_with_domain_big_domain\"]))\n",
    "                    print(\"mse_with_domain_big_domain: {0:.9f}\".format(all_losses_dict[\"metrics_big_domain\"][\"mse_with_domain_big_domain\"]))\n",
    "            print()\n",
    "            try:\n",
    "                sys.stdout.flush()\n",
    "            except:\n",
    "                pass\n",
    "            # Plotting:\n",
    "            if isplot or filename is not None:\n",
    "                self.plot(X_test, y_test, forward_steps = forward_steps, view_init = view_init, figsize = figsize, is_show = isplot, filename = filename + \"_{0}\".format(k) if filename is not None else None,\n",
    "                          true_domain = kwargs[\"true_domain_test\"] if \"true_domain_test\" in kwargs else None, num_output_dims = kwargs[\"num_output_dims\"],\n",
    "                          show_3D_plot = kwargs[\"show_3D_plot\"] if \"show_3D_plot\" in kwargs else False, \n",
    "                          show_vs = kwargs[\"show_vs\"] if \"show_vs\" in kwargs else False,\n",
    "                         )\n",
    "            print(\"=\" * 100 + \"\\n\\n\")\n",
    "\n",
    "        add_theory_count = 0\n",
    "        for k in range(num_iter):\n",
    "            # Configure reg_multiplier:\n",
    "            if self.reg_multiplier is not None:\n",
    "                if not to_stop:\n",
    "                    self.reg_model_idx += 1\n",
    "                    self.reg_multiplier_model = self.reg_multiplier[self.reg_model_idx] if self.reg_model_idx < len(self.reg_multiplier) else self.reg_multiplier[-1]\n",
    "                if domain_fit_setting is not None:\n",
    "                    self.reg_domain_idx += 1\n",
    "                    self.reg_multiplier_domain = self.reg_multiplier[self.reg_domain_idx] if self.reg_domain_idx < len(self.reg_multiplier) else self.reg_multiplier[-1]\n",
    "            else:\n",
    "                self.reg_multiplier_model = 1\n",
    "                if domain_fit_setting is not None:\n",
    "                    self.reg_multiplier_domain = 1\n",
    "\n",
    "            # Record and visualization:\n",
    "            if k % record_interval == 0:\n",
    "                all_losses_dict = self.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "                record_data(self.data_record, [k, self.optimizer.param_groups[0][\"lr\"], all_losses_dict, None], [\"iter\", \"lr\", \"all_losses_dict\", \"event\"])\n",
    "                if record_mode >= 2:\n",
    "                    record_data(self.data_record, [self.pred_nets.model_dict, self.domain_net.model_dict], [\"pred_nets_model_dict\", \"domain_net_model_dict\"])\n",
    "            if k % inspect_interval == 0:\n",
    "                show(k)\n",
    "\n",
    "            # Pre-optimization setting:\n",
    "            self.model_dict_second_last = deepcopy(self.model_dict_last)\n",
    "            self.model_dict_last[\"pred_nets\"] = self.net_dict[\"pred_nets\"].model_dict\n",
    "            if \"uncertainty_nets\" in self.net_dict:\n",
    "                self.model_dict_last[\"uncertainty_nets\"] = self.net_dict[\"uncertainty_nets\"].model_dict\n",
    "            \n",
    "            # Update domain target and lr every change_interval:\n",
    "            if k % change_interval == 0:\n",
    "                if scheduler_settings is not None:\n",
    "                    if scheduler_settings[0] == \"ReduceLROnPlateau\":\n",
    "                        loss_test = self.get_losses(X_test, y_test, mode = [\"loss\"], forward_steps = forward_steps)[\"loss\"]\n",
    "                        self.scheduler.step(loss_test)\n",
    "                    else:\n",
    "                        self.scheduler.step()\n",
    "                if domain_fit_setting is not None:\n",
    "                    if scheduler_settings is not None:\n",
    "                        if scheduler_settings[0] == \"ReduceLROnPlateau\":\n",
    "                            if self.domain_net_on:\n",
    "                                loss_domain_test = self.get_losses(X_test, y_test, mode = [\"loss_domain\"], forward_steps = forward_steps)[\"loss_domain\"]\n",
    "                                self.scheduler_domain.step(loss_domain_test)\n",
    "                        else:\n",
    "                            self.scheduler_domain.step()\n",
    "                    self.best_theory_idx = get_best_model_idx(self.net_dict, X_train, y_train, loss_fun_cumu = self.loss_fun_cumu, forward_steps = forward_steps, is_Lagrangian = self.is_Lagrangian)\n",
    "                    self.best_theory_idx_test = get_best_model_idx(self.net_dict, X_test, y_test, loss_fun_cumu = self.loss_fun_cumu, forward_steps = forward_steps, is_Lagrangian = self.is_Lagrangian)\n",
    "                    dataset_domain_train = data_utils.TensorDataset(X_train.data, self.best_theory_idx.data)\n",
    "                    self.train_loader_domain = data_utils.DataLoader(dataset_domain_train, batch_size = batch_size, shuffle = True)\n",
    "\n",
    "            # Loss-order decay:\n",
    "            if loss_order_decay is not None:\n",
    "                replaced_loss_order = self.loss_decay_scheduler.step()\n",
    "            else:\n",
    "                replaced_loss_order = None\n",
    "\n",
    "            # Gradient noise:\n",
    "            if self.scale_gen is not None:\n",
    "                hook_handle_list = []\n",
    "                if k % self.scale_gen.gradient_noise_interval_batch == 0:\n",
    "                    for h in hook_handle_list:\n",
    "                        h.remove()\n",
    "                    hook_handle_list = []\n",
    "                    scale_idx = int(k / self.scale_gen.gradient_noise_interval_batch)\n",
    "                    if scale_idx >= len(gradient_noise_scale):\n",
    "                        current_gradient_noise_scale = gradient_noise_scale[-1]\n",
    "                    else:\n",
    "                        current_gradient_noise_scale = gradient_noise_scale[scale_idx]\n",
    "                    for parameter in self.pred_nets.parameters():\n",
    "                        if parameter.requires_grad:\n",
    "                            h = parameter.register_hook(lambda grad: grad + Variable(torch.normal(means = torch.zeros(grad.size()),\n",
    "                                                                                     std = current_gradient_noise_scale * torch.ones(grad.size()))))\n",
    "                            hook_handle_list.append(h)\n",
    "\n",
    "            # Calculate loss and gradient:\n",
    "            if batch_size is None:\n",
    "                train_loader_model = [[X_train.data, y_train.data]]\n",
    "                if domain_fit_setting is not None:\n",
    "                    train_loader_domain = [[X_train.data, self.best_theory_idx.data]]\n",
    "            else:\n",
    "                train_loader_model = self.train_loader_model\n",
    "                if domain_fit_setting is not None:\n",
    "                    train_loader_domain = self.train_loader_domain\n",
    "\n",
    "            # Training:\n",
    "            for batch_idx, (X_batch, y_batch) in enumerate(train_loader_model):\n",
    "                # Trainging model:\n",
    "                X_batch = Variable(X_batch, requires_grad = False)\n",
    "                y_batch = Variable(y_batch, requires_grad = False)\n",
    "                if not to_stop:\n",
    "                    if self.optim_type[0] == \"LBFGS\":\n",
    "                        def closure():\n",
    "                            self.optimizer.zero_grad()\n",
    "                            loss, _ = get_loss(net_dict = self.net_dict, X = X_batch, y = y_batch, loss_types = self.loss_types,\n",
    "                                               forward_steps = forward_steps,\n",
    "                                               domain_net = self.domain_net if self.domain_net_on else None, \n",
    "                                               domain_pred_mode = domain_pred_mode,\n",
    "                                               loss_fun_dict = self.loss_fun_dict,\n",
    "                                               replaced_loss_order = replaced_loss_order,\n",
    "                                               is_Lagrangian = self.is_Lagrangian,\n",
    "                                              )\n",
    "                            if hasattr(self, \"autoencoder\"):\n",
    "                                scale_autoencoder = self.optim_autoencoder_type[2]\n",
    "                                loss_recons = self.autoencoder.get_loss(X_train, X_train, nn.MSELoss()) * scale_autoencoder\n",
    "                                loss = loss + loss_recons\n",
    "                            reg, _ = get_reg(net_dict = self.net_dict, reg_dict = self.reg_dict, mode = self.reg_mode, is_cuda = self.is_cuda)\n",
    "                            if reg_smooth is not None:\n",
    "                                input_noise_scale = reg_smooth[0]\n",
    "                                num_samples = reg_smooth[4]\n",
    "                                diff_list = []\n",
    "                                for _ in range(num_samples):\n",
    "                                    input_perturb = Variable(torch.randn(*X_batch.size()) * input_noise_scale)\n",
    "                                    if self.is_cuda:\n",
    "                                        input_perturb = input_perturb.cuda()\n",
    "                                    diff = get_preds_valid(self.net_dict, X_batch + input_perturb, forward_steps = 1, is_Lagrangian = self.is_Lagrangian)[0] - \\\n",
    "                                           get_preds_valid(self.net_dict, X_batch, forward_steps = 1, is_Lagrangian = self.is_Lagrangian)[0]\n",
    "                                    diff_list.append(diff)\n",
    "                                diff_list = torch.stack(diff_list, 2)\n",
    "                                smooth_norms = get_group_norm(diff, reg_smooth[1], reg_smooth[2])\n",
    "                                reg_smooth_value = smooth_norms.mean() * reg_smooth[3]\n",
    "                                reg = reg + reg_smooth_value\n",
    "#                             if reg_grad is not None:\n",
    "#                                 X_batch.requires_grad = True\n",
    "#                                 loss_indi = self.loss_fun_cumu(self.pred_nets(X_batch), y_batch, cumu_mode = \"original\", is_mean = False).mean(0)\n",
    "#                                 grad_norms = torch.cat([get_group_norm(grad(loss_indi[i], X_batch, create_graph = True)[0], reg_grad[0], reg_grad[1]) for i in range(self.num_theories)])\n",
    "#                                 X_batch.requires_grad = False\n",
    "#                                 reg_grad_value = grad_norms.mean() * reg_grad[2]\n",
    "#                                 reg = reg + reg_grad_value\n",
    "                            loss = loss + reg * self.reg_multiplier_model\n",
    "                            loss.backward()\n",
    "                            if self.grad_clipping is not None:\n",
    "                                total_norm = torch.nn.utils.clip_grad_norm(self.pred_nets.parameters(), self.grad_clipping)\n",
    "                            if np.isnan(to_np_array(loss)):\n",
    "                                self.data_record[\"is_nan\"] = True\n",
    "                                raise Exception(\"NaN encountered!\")\n",
    "                            return loss\n",
    "                        self.optimizer.step(closure)\n",
    "                        if hasattr(self, \"autoencoder\"):\n",
    "                            self.optimizer_autoencoder.step(closure)\n",
    "                    else:\n",
    "                        self.optimizer.zero_grad()\n",
    "                        loss, _ = get_loss(net_dict = self.net_dict, X = X_batch, y = y_batch, loss_types = self.loss_types,\n",
    "                                           forward_steps = forward_steps,\n",
    "                                           domain_net = self.domain_net if self.domain_net_on else None,\n",
    "                                           domain_pred_mode = domain_pred_mode,\n",
    "                                           loss_fun_dict = self.loss_fun_dict,\n",
    "                                           replaced_loss_order = replaced_loss_order,\n",
    "                                           is_Lagrangian = self.is_Lagrangian,\n",
    "                                          )\n",
    "                        if hasattr(self, \"autoencoder\"):\n",
    "                            scale_autoencoder = self.optim_autoencoder_type[2]\n",
    "                            loss_recons = self.autoencoder.get_loss(X_train, X_train, nn.MSELoss()) * scale_autoencoder\n",
    "                            loss = loss + loss_recons\n",
    "                        reg, _ = get_reg(net_dict = self.net_dict, reg_dict = self.reg_dict, mode = self.reg_mode, is_cuda = self.is_cuda)\n",
    "                        if reg_smooth is not None:\n",
    "                            input_noise_scale = reg_smooth[0]\n",
    "                            num_samples = reg_smooth[4]\n",
    "                            diff_list = []\n",
    "                            for _ in range(num_samples):\n",
    "                                input_perturb = Variable(torch.randn(*X_batch.size()) * input_noise_scale)\n",
    "                                if self.is_cuda:\n",
    "                                    input_perturb = input_perturb.cuda()\n",
    "                                diff = get_preds_valid(self.net_dict, X_batch + input_perturb, forward_steps = 1, is_Lagrangian = self.is_Lagrangian)[0] - \\\n",
    "                                       get_preds_valid(self.net_dict, X_batch, forward_steps = 1, is_Lagrangian = self.is_Lagrangian)[0]\n",
    "                                diff_list.append(diff)\n",
    "                            diff_list = torch.stack(diff_list, 2)\n",
    "                            smooth_norms = get_group_norm(diff, reg_smooth[1], reg_smooth[2])\n",
    "                            reg_smooth_value = smooth_norms.mean() * reg_smooth[3]\n",
    "                            reg = reg + reg_smooth_value\n",
    "#                         if reg_grad is not None:\n",
    "#                             X_batch.requires_grad = True\n",
    "#                             loss_indi = self.loss_fun_cumu(self.pred_nets(X_batch), y_batch, cumu_mode = \"original\", is_mean = False).mean(0)\n",
    "#                             grad_norms = torch.cat([get_group_norm(grad(loss_indi[i], X_batch, create_graph = True)[0], reg_grad[0], reg_grad[1]) for i in range(self.num_theories)])\n",
    "#                             X_batch.requires_grad = False\n",
    "#                             reg_grad_value = grad_norms.mean() * reg_grad[2]\n",
    "#                             reg = reg + reg_grad_value\n",
    "                        loss = loss + reg * self.reg_multiplier_model\n",
    "                        loss.backward()\n",
    "                        if self.grad_clipping is not None:\n",
    "                            total_norm = torch.nn.utils.clip_grad_norm(self.pred_nets.parameters(), self.grad_clipping)\n",
    "                        if np.isnan(to_np_array(loss)):\n",
    "                            if raise_nan:\n",
    "                                self.data_record[\"is_nan\"] = True\n",
    "                                raise Exception(\"NaN encountered!\")\n",
    "                            else:\n",
    "                                self.data_record[\"is_nan\"] = True\n",
    "                                print(\"NaN encountered!\")\n",
    "                                self.pred_nets.load_model_dict(self.model_dict_second_last[\"pred_nets\"])\n",
    "                                self.net_dict[\"pred_nets\"] = self.pred_nets\n",
    "                                return deepcopy(self.data_record)\n",
    "                        self.optimizer.step()\n",
    "                        if hasattr(self, \"autoencoder\"):\n",
    "                            self.optimizer_autoencoder.step()\n",
    "\n",
    "                # Training domain:\n",
    "                if domain_fit_setting is not None:\n",
    "                    X_batch_domain, best_idx_domain = list(self.train_loader_domain)[batch_idx]\n",
    "                    X_batch_domain = Variable(X_batch_domain, requires_grad = False)\n",
    "                    best_idx_domain = Variable(best_idx_domain, requires_grad = False)\n",
    "                    if self.is_cuda:\n",
    "                        X_batch_domain = X_batch_domain.cuda()\n",
    "                        best_idx_domain = best_idx_domain.cuda()\n",
    "                    if hasattr(self, \"autoencoder\"):\n",
    "                         X_batch_domain = self.autoencoder.encode(X_batch_domain)\n",
    "                    if domain_fit_setting[\"optim_domain_type\"][0] == \"LBFGS\":\n",
    "                        def closure_domain():\n",
    "                            self.optimizer_domain.zero_grad()\n",
    "                            loss_domain = nn.CrossEntropyLoss()(self.domain_net(X_batch_domain), best_idx_domain)\n",
    "                            reg_domain, _ = get_reg(net_dict = self.net_dict, reg_dict = domain_fit_setting[\"reg_domain_dict\"], mode = domain_fit_setting[\"reg_domain_mode\"], is_cuda = self.is_cuda)\n",
    "                            loss_domain = loss_domain + reg_domain * self.reg_multiplier_domain\n",
    "                            loss_domain.backward()\n",
    "                            return loss_domain\n",
    "                        self.optimizer_domain.step(closure_domain)\n",
    "                    else:\n",
    "                        self.optimizer_domain.zero_grad()\n",
    "                        loss_domain = nn.CrossEntropyLoss()(self.domain_net(X_batch_domain), best_idx_domain)\n",
    "                        reg_domain, _ = get_reg(net_dict = self.net_dict, reg_dict = domain_fit_setting[\"reg_domain_dict\"], mode = domain_fit_setting[\"reg_domain_mode\"], is_cuda = self.is_cuda)\n",
    "                        loss_domain = loss_domain + reg_domain * self.reg_multiplier_domain\n",
    "                        loss_domain.backward()\n",
    "                        self.optimizer_domain.step()\n",
    "\n",
    "            # Early stopping:\n",
    "            loss_dict = self.get_losses(X_test, y_test, mode = [\"loss\", \"mse_with_domain\", \"loss_domain\"], forward_steps = forward_steps)\n",
    "            if loss_dict[\"mse_with_domain\"] < loss_floor:\n",
    "                print(\"loss = {0} is below the floor level {1}, stop.\".format(loss_dict[\"mse_with_domain\"], loss_floor))\n",
    "                break\n",
    "            if not to_stop:\n",
    "                if patience is not None:\n",
    "                    to_stop = self.early_stopping.monitor(loss_dict[\"loss\"])\n",
    "                if to_stop:\n",
    "                    if add_theory_loss_threshold is not None and (add_theory_quota is None or (add_theory_quota is not None and add_theory_count < add_theory_quota)) \\\n",
    "                                                             and (add_theory_limit is None or (add_theory_limit is not None and self.num_theories < add_theory_limit)):\n",
    "                        is_add, add_theories_info = self.add_theories(X_train, y_train,\n",
    "                                                                      validation_data = (X_test, y_test),\n",
    "                                                                      criteria = add_theory_criteria,\n",
    "                                                                      loss_threshold = add_theory_loss_threshold,\n",
    "                                                                      **kwargs\n",
    "                                                                     )\n",
    "                        if is_add:\n",
    "                            print(\"At iteration {0}\".format(k))\n",
    "                            to_stop = False\n",
    "                            add_theory_count += 1\n",
    "                            self.early_stopping.reset()\n",
    "                            all_losses_dict = self.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "                            record_data(self.data_record, [k, self.optimizer.param_groups[0][\"lr\"], all_losses_dict, (\"add_theories\", add_theories_info)], [\"iter\", \"lr\", \"all_losses_dict\", \"event\"])\n",
    "                            if record_mode >= 2:\n",
    "                                record_data(self.data_record, [self.pred_nets.model_dict, self.domain_net.model_dict], [\"pred_nets_model_dict\", \"domain_net_model_dict\"])\n",
    "                            show(k)\n",
    "                    else:\n",
    "                        is_add = False\n",
    "                    if not is_add:\n",
    "                        all_losses_dict = self.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "                        record_data(self.data_record, [k, self.optimizer.param_groups[0][\"lr\"], all_losses_dict, \"Model training complete.\"], [\"iter\", \"lr\", \"all_losses_dict\", \"event\"])\n",
    "                        if record_mode >= 2:\n",
    "                            record_data(self.data_record, [self.pred_nets.model_dict, self.domain_net.model_dict], [\"pred_nets_model_dict\", \"domain_net_model_dict\"])\n",
    "                        print(\"model training complete with early stopping at iteration {0}, with loss = {1:.9f}. Continue domain training.\".format(k, loss_dict[\"loss\"]))\n",
    "            if domain_fit_setting is not None:\n",
    "                if patience is not None:\n",
    "                    to_stop_domain = self.early_stopping_domain.monitor(loss_dict[\"loss_domain\"])\n",
    "            if to_stop and (domain_fit_setting is None or (domain_fit_setting is not None and to_stop_domain)):\n",
    "                print(\"the loss does not decrease for {0} consecutive iterations, stop at iteration {1}. Latest loss: {2}\".format(patience, k, loss_dict[\"loss\"]))\n",
    "                break\n",
    "\n",
    "        all_losses_dict = self.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "        record_data(self.data_record, [k + 1, self.optimizer.param_groups[0][\"lr\"], all_losses_dict, \"end\"], [\"iter\", \"lr\", \"all_losses_dict\", \"event\"])\n",
    "        if record_mode >= 2:\n",
    "            record_data(self.data_record, [self.pred_nets.model_dict, self.domain_net.model_dict], [\"pred_nets_model_dict\", \"domain_net_model_dict\"])\n",
    "        show(k + 1)\n",
    "        self.data_record[\"is_nan\"] = False\n",
    "        print(\"completed!\")\n",
    "        return deepcopy(self.data_record)\n",
    "    \n",
    "    \n",
    "    def fit_domain(\n",
    "        self,\n",
    "        X_train,\n",
    "        y_train,\n",
    "        validation_data = None,\n",
    "        optim_domain_type = (\"adam\", 1e-3),\n",
    "        reg_domain_dict = {},\n",
    "        reg_domain_mode = \"L1\",\n",
    "        forward_steps = 1,\n",
    "        domain_pred_mode = \"onehot\",\n",
    "        scheduler_settings = (\"ReduceLROnPlateau\", 30, 0.1),\n",
    "        epochs = None,\n",
    "        patience = None,\n",
    "        inspect_interval = None,\n",
    "        change_interval = 1,\n",
    "        record_interval = None,\n",
    "        isplot = True,\n",
    "        filename = None,\n",
    "        view_init = (10, 190),\n",
    "        loss_floor = 1e-12,\n",
    "        prefix = None,\n",
    "        **kwargs\n",
    "        ):\n",
    "        self.domain_net_on = True\n",
    "        X_test, y_test = validation_data\n",
    "        self.optim_domain_type = optim_domain_type\n",
    "        self.reg_domain_dict = reg_domain_dict\n",
    "        self.reg_domain_mode = reg_domain_mode\n",
    "        batch_size = kwargs[\"batch_size\"] if \"batch_size\" in kwargs else None\n",
    "        record_mode = kwargs[\"record_mode\"] if \"record_mode\" in kwargs else 1\n",
    "        \n",
    "        if validation_data is None:\n",
    "            validation_data = (X_train, y_train)\n",
    "\n",
    "        if not hasattr(self, \"optimizer_domain\"):\n",
    "            self.optimizer_domain = get_optimizer(optim_type = self.optim_domain_type[0], lr = self.optim_domain_type[1], parameters = self.domain_net.parameters())\n",
    "        else:\n",
    "            new_lr = np.sqrt(self.optimizer_domain.param_groups[0][\"lr\"] * self.optim_domain_type[1])\n",
    "            for param_group in self.optimizer_domain.param_groups:\n",
    "                param_group[\"lr\"] = new_lr\n",
    "    \n",
    "        if batch_size is None:\n",
    "            if record_interval is None:\n",
    "                record_interval = 50\n",
    "            if self.optim_domain_type[0] == \"LBFGS\":\n",
    "                num_iter_domain = 5000\n",
    "                inspect_interval_domain = 100\n",
    "            else:\n",
    "                num_iter_domain = 30000\n",
    "                inspect_interval_domain = 2000\n",
    "        else:\n",
    "            if record_interval is None:\n",
    "                record_interval = 1\n",
    "            if self.optim_domain_type[0] == \"LBFGS\":\n",
    "                num_iter_domain = 250\n",
    "                inspect_interval_domain = 10\n",
    "            else:\n",
    "                num_iter_domain = 1000\n",
    "                inspect_interval_domain = 20              \n",
    "        if epochs is not None:\n",
    "            num_iter_domain = epochs\n",
    "        if inspect_interval is None:\n",
    "            inspect_interval = inspect_interval_domain\n",
    "        assert inspect_interval % record_interval == 0\n",
    "        \n",
    "        figsize = (10, 8)\n",
    "\n",
    "        self.data_record_domain = {}\n",
    "        self.best_theory_idx = get_best_model_idx(self.net_dict, X_train, y_train, loss_fun_cumu = self.loss_fun_cumu, forward_steps = forward_steps, is_Lagrangian = self.is_Lagrangian)\n",
    "        self.best_theory_idx_test = get_best_model_idx(self.net_dict, X_test, y_test, loss_fun_cumu = self.loss_fun_cumu, forward_steps = forward_steps, is_Lagrangian = self.is_Lagrangian)\n",
    "        if hasattr(self, \"autoencoder\"):\n",
    "            X_train_lat = Variable(self.autoencoder.encode(X_train).data, requires_grad = False)\n",
    "            X_test_lat = Variable(self.autoencoder.encode(X_test).data, requires_grad = False)\n",
    "        else:\n",
    "            X_train_lat, X_test_lat = X_train, X_test\n",
    "        if batch_size is not None:\n",
    "            dataset_domain_train = data_utils.TensorDataset(X_train_lat.data, self.best_theory_idx.data)\n",
    "            self.train_loader_domain = data_utils.DataLoader(dataset_domain_train, batch_size = batch_size, shuffle = True)\n",
    "            \n",
    "        # Setting up lr_scheduler:\n",
    "        if scheduler_settings is not None:\n",
    "            if scheduler_settings[0] == \"LambdaLR\":\n",
    "                function_type = scheduler_settings[1]\n",
    "                decay_scale = scheduler_settings[2]\n",
    "                scheduler_continue_decay = scheduler_settings[3]\n",
    "                if function_type == \"exp\":\n",
    "                    lambda_domain = lambda epoch: (1 - 1 / float(num_iter_domain / change_interval / decay_scale)) ** epoch\n",
    "                elif function_type == \"poly\":\n",
    "                    lambda_domain = lambda epoch: 1 / (1 + 0.01 * epoch * change_interval * decay_scale)\n",
    "                else:\n",
    "                    raise\n",
    "                if scheduler_continue_decay:\n",
    "                    if not hasattr(self, \"scheduler_domain\"):\n",
    "                        self.scheduler_domain = LambdaLR(self.optimizer_domain, lr_lambda = lambda_domain) \n",
    "                else:\n",
    "                    self.scheduler_domain = LambdaLR(self.optimizer_domain, lr_lambda = lambda_domain)\n",
    "            elif scheduler_settings[0] == \"ReduceLROnPlateau\":\n",
    "                scheduler_patience = scheduler_settings[1]\n",
    "                scheduler_factor = scheduler_settings[2]\n",
    "                self.scheduler_domain = ReduceLROnPlateau(self.optimizer_domain, factor = scheduler_factor, patience = scheduler_patience, verbose = True)\n",
    "            else:\n",
    "                raise\n",
    "\n",
    "        self.early_stopping_domain = Early_Stopping(patience = patience, epsilon = 1e-10)\n",
    "        to_stop_domain = False\n",
    "        \n",
    "        def show(k):\n",
    "            all_losses_dict = self.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "            if prefix is not None:\n",
    "                print(prefix)\n",
    "            print(\"domain_iter {0}\\tlr = {1:.9f}\\nloss_domain:\\t{2:.9f}\\nreg_domain:\\t{3:.9f}\".format(k, self.optimizer_domain.param_groups[0][\"lr\"], \n",
    "                                                                                                  all_losses_dict[\"loss_domain\"], all_losses_dict[\"reg_domain\"]))\n",
    "            print(\"loss_best:\\t{0:.9f}\\nloss_model:\\t{1:.9f}\\nloss_with_domain:\\t{2:.9f}\".format(all_losses_dict[\"loss_best\"], all_losses_dict[\"loss\"], all_losses_dict[\"loss_with_domain\"]))\n",
    "            for loss_mode in all_losses_dict[\"loss_dict\"]:\n",
    "                print(\"loss_{0}:\\t{1:.9f}\".format(loss_mode, all_losses_dict[\"loss_dict\"][loss_mode]))\n",
    "            for i in range(self.num_theories):\n",
    "                print(\"{0}_theory_{1}:\\t{2:.9f}\\tfraction best: {3:.5f} \\tfraction domain: {4:.5f}\".format(self.loss_fun_cumu.loss_fun.core, i, all_losses_dict[\"loss_indi_theory\"][i], all_losses_dict[\"fraction_list_best\"][i], all_losses_dict[\"fraction_list_domain\"][i]))\n",
    "            print(\"mse_with_domain:\\t{0:.9f}\\nmse_without_domain:\\t{1:.9f}\".format(all_losses_dict[\"mse_with_domain\"], all_losses_dict[\"mse_without_domain\"]))\n",
    "            if self.reg_multiplier is not None:\n",
    "                print(\"reg_multiplier_domain iter {0}\\t{1:.9f}\".format(self.reg_domain_idx, self.reg_multiplier_domain))\n",
    "            if \"big_domain_ids\" in kwargs and kwargs[\"big_domain_ids\"] is not None:\n",
    "                if \"metrics_big_domain\" in all_losses_dict and all_losses_dict[\"metrics_big_domain\"] is not None:\n",
    "                    union = all_losses_dict[\"metrics_big_domain\"][\"union\"]\n",
    "                    predicted_big_domains = all_losses_dict[\"metrics_big_domain\"][\"predicted_big_domains\"]\n",
    "                    true_big_domains = all_losses_dict[\"metrics_big_domain\"][\"true_big_domains\"]\n",
    "                    intersection = all_losses_dict[\"metrics_big_domain\"][\"intersection\"]\n",
    "                    intersection_in_big = all_losses_dict[\"metrics_big_domain\"][\"intersection_in_big\"]\n",
    "                    if union is not None:\n",
    "                        precision = intersection / float(predicted_big_domains)\n",
    "                        recall = intersection / float(true_big_domains)\n",
    "                        F1 = 2 / (1 / precision  + 1 / recall)\n",
    "                        IoU = intersection / float(union)\n",
    "                        print(\"union: {0}\\tpredicted_big_domains: {1}\\ttrue_big_domains: {2}\\tintersection_in_big: {3}\\tintersection: {4s}\".format(union, predicted_big_domains, true_big_domains, intersection_in_big, intersection))\n",
    "                        print(\"Precision: {0:.4f}\\tRecall: {1:.4f}\\tF1: {2:.4f}\\tIoU: {3:.4f}\".format(precision, recall, F1, IoU))\n",
    "                    print(\"loss_big_domain: {0:.9f}\".format(all_losses_dict[\"metrics_big_domain\"][\"loss_big_domain\"]))\n",
    "                    print(\"loss_with_domain_big_domain: {0:.9f}\".format(all_losses_dict[\"metrics_big_domain\"][\"loss_with_domain_big_domain\"]))\n",
    "                    print(\"mse_with_domain_big_domain: {0:.9f}\".format(all_losses_dict[\"metrics_big_domain\"][\"mse_with_domain_big_domain\"]))\n",
    "            print()\n",
    "            try:\n",
    "                sys.stdout.flush()\n",
    "            except:\n",
    "                pass\n",
    "            if isplot or filename is not None:\n",
    "                self.plot(X_test, y_test, forward_steps = forward_steps, view_init = view_init, figsize = figsize, is_show = isplot, filename = filename + \"_{0}\".format(k) if filename is not None else None,\n",
    "                          true_domain = kwargs[\"true_domain_test\"] if \"true_domain_test\" in kwargs else None,\n",
    "                          show_3D_plot = kwargs[\"show_3D_plot\"] if \"show_3D_plot\" in kwargs else False, \n",
    "                          show_vs = kwargs[\"show_vs\"] if \"show_vs\" in kwargs else False,\n",
    "                         )\n",
    "            print(\"=\" * 100 + \"\\n\\n\")\n",
    "        \n",
    "        for k in range(num_iter_domain):\n",
    "            if self.reg_multiplier is not None:\n",
    "                self.reg_domain_idx += 1\n",
    "                self.reg_multiplier_domain = self.reg_multiplier[self.reg_domain_idx] if self.reg_domain_idx < len(self.reg_multiplier) else self.reg_multiplier[-1]\n",
    "            else:\n",
    "                self.reg_multiplier_domain = 1\n",
    "\n",
    "            # Record:\n",
    "            if k % record_interval == 0:  \n",
    "                all_losses_dict = self.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "                record_data(self.data_record_domain, [k, self.optimizer_domain.param_groups[0][\"lr\"], all_losses_dict, None], [\"iter\", \"lr\", \"all_losses_dict\", \"event\"])\n",
    "                if record_mode >= 2:\n",
    "                    record_data(self.data_record_domain, [self.pred_nets.model_dict, self.domain_net.model_dict], [\"pred_nets_model_dict\", \"domain_net_model_dict\"])\n",
    "            if k % inspect_interval == 0:\n",
    "                show(k)\n",
    "            if k % change_interval == 0:\n",
    "                if scheduler_settings is not None:\n",
    "                    if scheduler_settings[0] == \"ReduceLROnPlateau\":\n",
    "                        loss_domain_test = self.get_losses(X_test, y_test, mode = [\"loss_domain\"], forward_steps = forward_steps)[\"loss_domain\"]\n",
    "                        self.scheduler_domain.step(loss_domain_test)\n",
    "                    else:\n",
    "                        self.scheduler_domain.step()\n",
    "\n",
    "            if batch_size is None:\n",
    "                train_loader_domain = [[X_train_lat.data, self.best_theory_idx.data]]\n",
    "            else:\n",
    "                train_loader_domain = self.train_loader_domain\n",
    "            for batch_idx, (X_domain_batch, best_idx_domain) in enumerate(train_loader_domain):\n",
    "                X_domain_batch = Variable(X_domain_batch, requires_grad = False)\n",
    "                best_idx_domain = Variable(best_idx_domain, requires_grad = False)\n",
    "                if self.is_cuda:\n",
    "                    X_domain_batch = X_domain_batch.cuda()\n",
    "                    best_idx_domain = best_idx_domain.cuda()\n",
    "\n",
    "                if self.optim_domain_type[0] == \"LBFGS\":\n",
    "                    def closure_domain():\n",
    "                        self.optimizer_domain.zero_grad()\n",
    "                        loss_domain = nn.CrossEntropyLoss()(self.domain_net(X_domain_batch), best_idx_domain)\n",
    "                        reg_domain, _ = get_reg(net_dict = self.net_dict, reg_dict = self.reg_domain_dict, mode = self.reg_domain_mode, is_cuda = self.is_cuda)\n",
    "                        loss_domain = loss_domain + reg_domain * self.reg_multiplier_domain\n",
    "                        loss_domain.backward()\n",
    "                        return loss_domain\n",
    "                    self.optimizer_domain.step(closure_domain)\n",
    "                else:\n",
    "                    self.optimizer_domain.zero_grad()\n",
    "                    loss_domain = nn.CrossEntropyLoss()(self.domain_net(X_domain_batch), best_idx_domain)\n",
    "                    reg_domain, _ = get_reg(net_dict = self.net_dict, reg_dict = self.reg_domain_dict, mode = self.reg_domain_mode, is_cuda = self.is_cuda)\n",
    "                    loss_domain = loss_domain + reg_domain * self.reg_multiplier_domain\n",
    "                    loss_domain.backward()\n",
    "                    self.optimizer_domain.step()\n",
    "\n",
    "            loss_dict = self.get_losses(X_test, y_test, mode = [\"mse_with_domain\", \"loss_domain\"], forward_steps = forward_steps)\n",
    "            if loss_dict[\"mse_with_domain\"] < loss_floor:\n",
    "                print(\"loss = {0} is below the loss floor level {1}, stop.\".format(loss_dict[\"mse_with_domain\"], loss_floor))\n",
    "                break\n",
    "\n",
    "            loss_domain_test = loss_dict[\"loss_domain\"]\n",
    "            to_stop_domain = self.early_stopping_domain.monitor(loss_domain_test)\n",
    "            if to_stop_domain:\n",
    "                print(\"the loss_domain does not decrease for {0} consecutive iterations, stop at iteration {1}. loss_domain: {2}\".format(patience, k, loss_domain_test))\n",
    "                break\n",
    "        print(\"completed\")\n",
    "        # Record:\n",
    "        all_losses_dict = self.get_losses(X_test, y_test, forward_steps = forward_steps, **kwargs)\n",
    "        record_data(self.data_record_domain, [k + 1, self.optimizer_domain.param_groups[0][\"lr\"], all_losses_dict, \"end\"], [\"iter\", \"lr\", \"all_losses_dict\", \"event\"])\n",
    "        if record_mode >= 2:\n",
    "            record_data(self.data_record_domain, [self.pred_nets.model_dict, self.domain_net.model_dict], [\"pred_nets_model_dict\", \"domain_net_model_dict\"])\n",
    "        show(k + 1)\n",
    "        self.data_record_domain[\"is_nan\"] = False\n",
    "        return deepcopy(self.data_record_domain)\n",
    "\n",
    "\n",
    "    def plot(\n",
    "        self,\n",
    "        X,\n",
    "        y, \n",
    "        forward_steps = 1,\n",
    "        view_init = (10, 190),\n",
    "        figsize = (10, 8), \n",
    "        show_3D_plot = False,\n",
    "        show_vs = False,\n",
    "        show_loss_histogram = True,\n",
    "        is_show = True,\n",
    "        filename = None,\n",
    "        **kwargs\n",
    "        ):\n",
    "        if not is_show:\n",
    "            import matplotlib\n",
    "            matplotlib.use('Agg')\n",
    "        import matplotlib.pyplot as plt\n",
    "        if hasattr(self, \"autoencoder\"):\n",
    "            X_lat = self.autoencoder.encode(X)\n",
    "        else:\n",
    "            X_lat = X\n",
    "        preds, valid_onehot = get_preds_valid(self.net_dict, X, forward_steps = forward_steps, domain_net = self.domain_net, domain_pred_mode = \"onehot\", is_Lagrangian = self.is_Lagrangian)\n",
    "        best_theory_idx = get_best_model_idx(self.net_dict, X, y, loss_fun_cumu = self.loss_fun_cumu, forward_steps = forward_steps, mode = \"expanded\", is_Lagrangian = self.is_Lagrangian)\n",
    "        best_theory_onehot = to_one_hot(best_theory_idx, valid_onehot.size(1))\n",
    "        true_domain = kwargs[\"true_domain\"] if \"true_domain\" in kwargs else None\n",
    "        if true_domain is not None:\n",
    "            uniques = np.unique(to_np_array(true_domain))\n",
    "            num_uniques = int(max(np.max(uniques) + 1, len(uniques)))\n",
    "            true_domain_onehot = to_one_hot(true_domain, num_uniques)\n",
    "        else:\n",
    "            true_domain_onehot = None\n",
    "        if show_3D_plot:\n",
    "            from mpl_toolkits.mplot3d import Axes3D\n",
    "            loss_dict = self.get_losses(X, y, mode = \"all\")\n",
    "            if self.input_size > 1:\n",
    "                for i in range(y.size(1)):\n",
    "                    if is_show:\n",
    "                        print(\"target with axis {0}:\".format(i))\n",
    "                    if true_domain is not None:\n",
    "                        axis_lim = plot3D(X, y[:,i:i+1].repeat(1, num_uniques), true_domain_onehot, view_init = view_init, figsize = figsize, is_show = is_show, filename = filename + \"_target_ax_{0}.png\".format(i) if filename is not None else None) \n",
    "                    else:\n",
    "                        axis_lim = plot3D(X, y[:,i:i+1], view_init = view_init, figsize = figsize, is_show = is_show, filename = filename + \"_target_ax_{0}.png\".format(i) if filename is not None else None) \n",
    "                for i in range(preds.size(2)):\n",
    "                    if is_show:\n",
    "                        print(\"best_prediction with axis {0}:\".format(i))\n",
    "                    plot3D(X, preds[:,:,i], best_theory_onehot, view_init = view_init, axis_lim = axis_lim, \n",
    "                           axis_title = [\"loss_best = {0:.9f}\".format(loss_dict[\"loss_best\"]),\n",
    "                                          \"\\n\".join([\"{2}_theory_{0}: {1:.9f}\".format(j, loss_dict['loss_indi_theory'][j], self.loss_fun_cumu.loss_fun.core) for j in range(len(loss_dict['loss_indi_theory']))]),\n",
    "                                        ], \n",
    "                           figsize = figsize, is_show = is_show, filename = filename + \"_best-prediction_ax_{0}.png\".format(i) if filename is not None else None)\n",
    "                for i in range(preds.size(2)):\n",
    "                    if is_show:\n",
    "                        print(\"all theory prediction with axis {0}:\".format(i))\n",
    "                    plot3D(X, preds[:,:,i], valid_onehot, view_init = view_init, axis_lim = axis_lim, \n",
    "                           axis_title = [\"mse_with_domain = {0:.9f}\".format(loss_dict['mse_with_domain']), \"loss_total = {0:.9f}\".format(loss_dict[\"loss\"])], \n",
    "                           figsize = figsize, is_show = is_show, filename = filename + \"_all-prediction_ax_{0}.png\".format(i) if filename is not None else None)\n",
    "            else:\n",
    "                ylim = (np.floor(preds.data.min()) - 3, np.ceil(preds.data.max()) + 3)        \n",
    "                if \"uncertainty_nets\" in self.net_dict:\n",
    "                    pred_with_uncertainty, info_list = get_pred_with_uncertainty(preds, self.net_dict[\"uncertainty_nets\"], X)\n",
    "                    fig = plt.figure(figsize = (6, 5))\n",
    "                    sigma = info_list.sum(1) ** (-0.5)\n",
    "                    plt.errorbar(to_np_array(X), to_np_array(pred_with_uncertainty), yerr = to_np_array(sigma), fmt='ob', markersize= 1, alpha = 0.4, label = \"theory_whole\")\n",
    "                    plt.ylim(ylim)\n",
    "                    plt.plot(to_np_array(X), to_np_array(y), \".k\", markersize = 2, alpha = 0.9)\n",
    "                else:\n",
    "                    for j in range(self.num_theories):\n",
    "                        plt.plot(to_np_array(X), to_np_array(preds[:,j]), color = COLOR_LIST[j % len(COLOR_LIST)], marker = \".\", markersize = 1, alpha = 0.6, label = \"theory_{0}\".format(j))\n",
    "                    plt.plot(to_np_array(X), to_np_array(y), \".k\", markersize = 2, alpha = 0.9)\n",
    "                    plt.legend()\n",
    "                    plt.ylim(ylim)\n",
    "                plt.legend()\n",
    "                plt.show()\n",
    "\n",
    "                fig = plt.figure(figsize = (self.num_theories * 6, 5))\n",
    "                for j in range(self.num_theories):\n",
    "                    plt.subplot(1, self.num_theories, j + 1)\n",
    "                    if \"uncertainty-based\" in self.loss_types:\n",
    "                        plt.errorbar(to_np_array(X), to_np_array(preds[:, j]), yerr = to_np_array((info_list ** (-0.5))[:, j]), fmt='o{0}'.format(\n",
    "                                     COLOR_LIST[j % len(COLOR_LIST)]), markersize= 1, alpha = 0.2, label = \"theory_{0}\".format(j))\n",
    "                    else:\n",
    "                        plt.plot(to_np_array(X), to_np_array(preds[:,j]), color = COLOR_LIST[j % len(COLOR_LIST)], marker = \".\", markersize = 1, alpha = 0.6, label = \"theory_{0}\".format(j))\n",
    "                    plt.ylim(ylim)\n",
    "                    plt.plot(to_np_array(X), to_np_array(y), \".k\", markersize = 2, alpha = 0.9)\n",
    "                    plt.legend()\n",
    "                plt.show()\n",
    "\n",
    "        # Plotting the domain on a 2D plane:\n",
    "        if \"true_domain\" in kwargs and kwargs[\"true_domain\"] is not None and self.input_size % 2 == 0 and \\\n",
    "            (\"num_output_dims\" not in kwargs or (\"num_output_dims\" in kwargs and kwargs[\"num_output_dims\"] in [1, 2, 4])) and \\\n",
    "            self.is_Lagrangian is not True:\n",
    "            if \"num_output_dims\" in kwargs and kwargs[\"num_output_dims\"] == 4:\n",
    "                idx = Variable(torch.LongTensor(np.array([0,2])))\n",
    "            else:\n",
    "                idx = None\n",
    "            self.get_domain_plot(X, y, X_lat = X_lat, true_domain = kwargs[\"true_domain\"], \n",
    "                                 X_idx = idx, y_idx = idx, \n",
    "                                 is_plot_loss = False if len(y.shape) == 4 else True, \n",
    "                                 is_plot_indi_domain = False, \n",
    "                                 is_show = is_show, \n",
    "                                 filename = filename + \"_domain-plot.png\" if filename is not None else None,\n",
    "                                 is_Lagrangian = self.is_Lagrangian,\n",
    "                                )\n",
    "\n",
    "        # Plotting pred vs. target:\n",
    "        if show_vs:\n",
    "            for i in range(preds.size(2)):\n",
    "                _, (ax1, ax2) = plt.subplots(1, 2, sharey = True, figsize=(14,6))       \n",
    "                self.plot_pred_vs_y(preds[:,:,i], y[:,i:i+1], best_theory_onehot, title = \"best_prediction_ax_{0}\".format(i), ax = ax1, is_color = True, is_show = False, filename = filename + \"_pred-vs-target_ax_{0}.png\".format(i) if filename is not None else None, is_close = False)\n",
    "                self.plot_pred_vs_y(preds[:,:,i], y[:,i:i+1], valid_onehot, title = \"domain_prediction_ax_{0}\".format(i), ax = ax2, is_color = True, is_show = is_show, filename = filename + \"_pred-vs-target_ax_{0}.png\".format(i) if filename is not None else None)\n",
    "\n",
    "        if is_show and hasattr(self, \"autoencoder\"):\n",
    "            loss_indi_theory = []\n",
    "            for k in range(self.num_theories):\n",
    "                loss_indi_theory.append(to_np_array(self.loss_fun_cumu(preds[:, k:k+1], y, is_mean = False)))\n",
    "            loss_indi_theory = np.concatenate(loss_indi_theory, 1)\n",
    "            domains = to_np_array(self.domain_net(X_lat).max(1)[1])\n",
    "            X_recons = self.autoencoder(X)\n",
    "            print(\"reconstruct:\")\n",
    "            for i in np.random.randint(len(X), size = 2):\n",
    "                plot_matrices(torch.cat([X[i], X_recons[i]], 0), images_per_row = 5)\n",
    "            print(\"prediction:\")\n",
    "            for i in np.random.randint(len(X), size = 10):\n",
    "                print(\"losses: {0}\".format(to_string(loss_indi_theory[i], connect = \"\\t\", num_digits = 6)))\n",
    "                print(\"best_idx: {0}\\tdomain_idx: {1}\".format(to_np_array(best_theory_idx[i]), domains[i]))\n",
    "                plot_matrices(torch.cat([y[i], preds[i]], 0), images_per_row = 5)\n",
    "\n",
    "        if show_loss_histogram:\n",
    "            self.plot_loss_histogram(X, y, X_lat = X_lat, mode = \"log-mse\", forward_steps = forward_steps, is_show = is_show, filename = filename)\n",
    "            self.plot_loss_histogram(X, y, X_lat = X_lat, mode = \"DL\" if not (\"DL\" in self.loss_core and self.loss_core != \"DL\") else self.loss_core, forward_steps = forward_steps, is_show = is_show, filename = filename)\n",
    "\n",
    "        plt.clf()\n",
    "        plt.close()\n",
    "\n",
    "\n",
    "    def plot_pred_vs_y(self, preds, y, valid_onehot, title = None, ax = None, is_color = False, is_show = True, filename = None, is_close = True):\n",
    "        if ax is not None and not is_show:\n",
    "            import matplotlib\n",
    "            matplotlib.use('Agg')\n",
    "        import matplotlib.pyplot as plt\n",
    "        valid_onehot = to_Boolean(valid_onehot)\n",
    "        pred_chosen = torch.masked_select(preds, valid_onehot)\n",
    "        if ax is None:\n",
    "            if not is_color:\n",
    "                plt.plot(to_np_array(y), to_np_array(pred_chosen), \".\", markersize = 1)\n",
    "            else:\n",
    "                for i in range(valid_onehot.size(1)):\n",
    "                    plt.plot(to_np_array(y[:, 0][valid_onehot[:, i]]), to_np_array(preds[:, i][valid_onehot[:, i]]), \".\", color = COLOR_LIST[i % len(COLOR_LIST)], markersize = 1.5, alpha = 0.6)\n",
    "            plt.xlabel(\"y\")\n",
    "            plt.ylabel(\"pred\")\n",
    "            if title is not None:\n",
    "                plt.title(title)\n",
    "        else:\n",
    "            if not is_color:\n",
    "                ax.plot(to_np_array(y), to_np_array(pred_chosen), \".\", markersize = 1)\n",
    "            else:\n",
    "                for i in range(valid_onehot.size(1)):\n",
    "                    ax.plot(to_np_array(y[:, 0][valid_onehot[:, i]]), to_np_array(preds[:, i][valid_onehot[:, i]]), \".\", color = COLOR_LIST[i % len(COLOR_LIST)], markersize = 1.5, alpha = 0.6)\n",
    "            ax.set_xlabel(\"y\")\n",
    "            ax.set_ylabel(\"pred\")\n",
    "            if title is not None:\n",
    "                ax.set_title(title)\n",
    "        if filename is not None:\n",
    "            plt.savefig(filename)\n",
    "        if is_show:\n",
    "            plt.show()\n",
    "        if is_close:\n",
    "            plt.clf()\n",
    "            plt.close()\n",
    "\n",
    "\n",
    "    def plot_loss_histogram(self, X, y, X_lat = None, mode = \"log-mse\", forward_steps = 1, is_show = True, filename = None, **kwargs):\n",
    "        if not is_show:\n",
    "            import matplotlib\n",
    "            matplotlib.use('Agg')\n",
    "        import matplotlib.pyplot as plt\n",
    "        if mode == \"log-mse\":\n",
    "            loss_list = torch.log(get_loss(self.net_dict, X, y, loss_types = {\"pred-based_mean\": {\"amp\": 1.}}, forward_steps = forward_steps, domain_net = self.domain_net, \n",
    "                                           loss_fun_dict = {\"loss_fun_cumu\": Loss_Fun_Cumu(core = \"mse\", cumu_mode = \"mean\", balance_model_influence = False, loss_precision_floor = self.loss_precision_floor)},\n",
    "                                           is_Lagrangian = self.is_Lagrangian, is_mean = False)[0]) / np.log(10)\n",
    "            loss_list = to_np_array(loss_list)\n",
    "            range = kwargs[\"range\"] if \"range\" in kwargs else (-10, 1)\n",
    "        elif \"DL\" in mode:\n",
    "            loss_list = get_loss(self.net_dict, X, y, loss_types = {\"pred-based_mean\": {\"amp\": 1.}}, forward_steps = forward_steps, domain_net = self.domain_net, \n",
    "                                 loss_fun_dict = {\"loss_fun_cumu\": Loss_Fun_Cumu(core = mode, cumu_mode = \"mean\", balance_model_influence = False, loss_precision_floor = self.loss_precision_floor)},\n",
    "                                 is_Lagrangian = self.is_Lagrangian, is_mean = False)[0]\n",
    "            loss_list = to_np_array(loss_list)\n",
    "            if \"range\" in kwargs:\n",
    "                range = kwargs[\"range\"]\n",
    "            else:\n",
    "                range_max = int(np.ceil(loss_list.max() / 2) * 2)\n",
    "                range = (0, range_max)\n",
    "        else:\n",
    "            raise Exception(\"loss mode {0} not recognized!\".format(mode))\n",
    "        plt.figure(figsize = (16, 6))\n",
    "        plt.subplot(1, 2, 1)\n",
    "        plt.hist(loss_list, range = range, bins = 40)\n",
    "        if \"DL\" in mode:\n",
    "            plt.title(\"{0} histogram, loss_precision_floor = {1}\".format(mode, self.loss_precision_floor))\n",
    "        else:\n",
    "            plt.title(\"{} histogram\".format(mode))\n",
    "\n",
    "        plt.subplot(1, 2, 2)\n",
    "        domain_pred = self.domain_net(X_lat).max(1)[1]\n",
    "        for idx in np.unique(to_np_array(domain_pred)):\n",
    "            is_in = to_np_array(domain_pred == int(idx)).astype(bool)\n",
    "            plt.hist(loss_list[is_in], range = range, bins = 40, alpha = kwargs[\"alpha\"] if \"alpha\" in kwargs else 0.5, color = COLOR_LIST[idx % len(COLOR_LIST)], label = \"{0}\".format(idx))\n",
    "        plt.legend()\n",
    "        plt.title(\"{0} per theory\".format(mode))\n",
    "        if filename is not None:\n",
    "            plt.savefig(filename + \"_{0}_hist.png\".format(mode))\n",
    "        if is_show:\n",
    "            plt.show()\n",
    "        plt.clf()\n",
    "        plt.close()\n",
    "\n",
    "    \n",
    "    def get_domain_plot(self, X, y, X_lat, true_domain, X_idx = None, y_idx = None, is_plot_loss = True, forward_steps = 1, is_plot_indi_domain = False, is_show = True, filename = None, is_Lagrangian = False):\n",
    "        if not is_show:\n",
    "            import matplotlib\n",
    "            matplotlib.use('Agg')\n",
    "        import matplotlib.pylab as plt\n",
    "        if self.domain_net.is_cuda:\n",
    "            X = X.cuda()\n",
    "            y = y.cuda()\n",
    "        domain_net_pred = to_np_array(self.domain_net(X_lat).max(1)[1])\n",
    "        best_model_pred = to_np_array(get_best_model_idx(self.net_dict, X, y, loss_fun_cumu = self.loss_fun_cumu, is_Lagrangian = self.is_Lagrangian))\n",
    "        true_domain = to_np_array(true_domain.squeeze())\n",
    "        if is_plot_loss:\n",
    "            preds, _ = get_preds_valid(self.net_dict, X, forward_steps = forward_steps, domain_net = None, is_Lagrangian = self.is_Lagrangian)\n",
    "            \n",
    "#             loss_fun_cumu = Loss_Fun_Cumu(core = \"mse\", cumu_mode = \"mean\", balance_model_influence = False, loss_precision_floor = self.loss_precision_floor)\n",
    "#             log_loss_best = to_np_array(torch.log(loss_fun_cumu(preds, y, model_weights = None, cumu_mode = \"min\", is_mean = False)) / np.log(10))\n",
    "#             log_loss_domain = to_np_array(torch.log(get_loss(self.net_dict, X, y, loss_types = {\"pred-based_mean\": {\"amp\": 1.}}, forward_steps = forward_steps, domain_net = self.domain_net, loss_fun_dict = {\"loss_fun_cumu\": loss_fun_cumu}, is_mean = False)[0]) / np.log(10))\n",
    "            \n",
    "            loss_fun_cumu = Loss_Fun_Cumu(core = \"DLs\", cumu_mode = \"mean\", balance_model_influence = False, loss_precision_floor = self.loss_precision_floor)\n",
    "            log_loss_best = to_np_array(loss_fun_cumu(preds, y, model_weights = None, cumu_mode = \"min\", is_mean = False))\n",
    "            log_loss_domain = get_loss(self.net_dict, X, y, loss_types = {\"pred-based_mean\": {\"amp\": 1.}}, forward_steps = forward_steps, domain_net = self.domain_net, loss_fun_dict = {\"loss_fun_cumu\": loss_fun_cumu}, is_Lagrangian = self.is_Lagrangian, is_mean = False)[0]\n",
    "        else:\n",
    "            log_loss_best = None\n",
    "            log_loss_domain = None\n",
    "        \n",
    "        if X_idx is not None:\n",
    "            X_idx = to_Variable(X_idx).long()\n",
    "            X = torch.index_select(X, -1, X_idx)\n",
    "        if y_idx is not None:\n",
    "            y_idx = to_Variable(y_idx).long()\n",
    "            y = torch.index_select(y, -1, y_idx)\n",
    "\n",
    "        if is_plot_indi_domain:\n",
    "            plot_indi_domain(X_lat, domain = true_domain, is_show = is_show, filename = filename)\n",
    "\n",
    "        def plot_domains(\n",
    "            X,\n",
    "            domain,\n",
    "            y = y if is_plot_loss else None,\n",
    "            log_loss = None,\n",
    "            title = None,\n",
    "            is_legend = True,\n",
    "            ):\n",
    "            X = to_np_array(X).reshape(X.shape[0], -1, 2)\n",
    "            if y is not None:\n",
    "                y = to_np_array(y).reshape(y.shape[0], 1, 2)\n",
    "                Xy = np.concatenate([X, y], 1)\n",
    "\n",
    "            for idx in np.unique(domain):\n",
    "                domain_idx = (domain == int(idx)).astype(bool)\n",
    "                if y is not None:\n",
    "                    X_domain = Xy[domain_idx]\n",
    "                    y_domain = y[domain_idx]\n",
    "                    if log_loss is not None:\n",
    "                        log_loss_domain = to_np_array(log_loss)[domain_idx]\n",
    "                else:\n",
    "                    X_domain = X[domain_idx]\n",
    "\n",
    "                for i in range(len(X_domain)):\n",
    "                    if i == 0:\n",
    "                        plt.plot(X_domain[i, :, 0], X_domain[i, :, 1], \".-\", color = COLOR_LIST[idx % len(COLOR_LIST)], alpha = 0.5, markersize = 1, linewidth = 1, label = str(idx))\n",
    "                    else:\n",
    "                        plt.plot(X_domain[i, :, 0], X_domain[i, :, 1], \".-\", color = COLOR_LIST[idx % len(COLOR_LIST)], alpha = 0.5, markersize = 1, linewidth = 1)\n",
    "                if y is not None and log_loss is not None:\n",
    "                    plt.scatter(y_domain[:, 0, 0], y_domain[:, 0, 1], s = 4 * np.sqrt(log_loss_domain), color = COLOR_LIST[idx % len(COLOR_LIST)])\n",
    "            if is_legend:\n",
    "                plt.legend(bbox_to_anchor = (1, 0.9, 0.15 ,0.1))\n",
    "            if title is not None:\n",
    "                plt.title(title)\n",
    "\n",
    "        plt.figure(figsize = (19,16))\n",
    "        plt.subplot(2, 2, 1)\n",
    "        plot_domains(X_lat, true_domain, title = \"True\", is_legend = False)\n",
    "\n",
    "        plt.subplot(2, 2, 2)\n",
    "        plot_domains(X_lat, best_model_pred, log_loss = log_loss_best, title = \"Best\")\n",
    "\n",
    "        plt.subplot(2, 2, 3)\n",
    "        plot_domains(X_lat, domain_net_pred, log_loss = log_loss_domain, title = \"Domain: precision-floor: {0}\".format(self.loss_precision_floor))\n",
    "\n",
    "        if filename is not None:\n",
    "            plt.savefig(filename)\n",
    "        if is_show:\n",
    "            plt.show()\n",
    "        plt.clf()\n",
    "        plt.close()\n",
    "\n",
    "\n",
    "    def set_net(self, net_name, net):\n",
    "        setattr(self, net_name, net)\n",
    "        self.net_dict[net_name] = net\n",
    "        self.num_theories = self.pred_nets.num_models"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
