{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import grad\n",
    "import sys, os\n",
    "sys.path.append(os.path.join(os.path.dirname(\"__file__\"), '..', '..'))\n",
    "from AI_physicist.pytorch_net.net import MLP\n",
    "from AI_physicist.pytorch_net.util import get_criterion, MAELoss, to_np_array, to_Variable, Loss_Fun\n",
    "from AI_physicist.settings.global_param import PrecisionFloorLoss, Dt\n",
    "from AI_physicist.theory_learning.util_theory import logplus"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Important loss functions:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Loss_Fun_Cumu(nn.Module):\n",
    "    \"\"\"Implement the generalized-mean loss for differentiable divide-and-conquer. \"\"\"\n",
    "    def __init__(\n",
    "        self,\n",
    "        core,\n",
    "        cumu_mode,\n",
    "        neglect_threshold = None,\n",
    "        epsilon = 1e-10,\n",
    "        balance_model_influence = False,\n",
    "        balance_model_influence_epsilon = 0.03, \n",
    "        loss_precision_floor = None,\n",
    "        ):\n",
    "        super(Loss_Fun_Cumu, self).__init__()\n",
    "        self.name = \"Loss_Fun_Cumu\"\n",
    "        self.loss_fun = Loss_Fun(core = core, epsilon = epsilon, loss_precision_floor = loss_precision_floor)\n",
    "        self.cumu_mode = cumu_mode\n",
    "        self.neglect_threshold = neglect_threshold\n",
    "        self.epsilon = epsilon\n",
    "        self.balance_model_influence = balance_model_influence\n",
    "        self.balance_model_influence_epsilon = balance_model_influence_epsilon\n",
    "        self.loss_precision_floor = loss_precision_floor\n",
    "\n",
    "    def forward(\n",
    "        self,\n",
    "        pred,\n",
    "        target,\n",
    "        model_weights = None,\n",
    "        sample_weights = None,\n",
    "        neglect_threshold_on = True,\n",
    "        is_mean = True,\n",
    "        cumu_mode = None,\n",
    "        balance_model_influence = None,\n",
    "        ):\n",
    "        num = pred.size(1)\n",
    "        if num == 1:\n",
    "            return self.loss_fun(pred, target, sample_weights = sample_weights, is_mean = is_mean)\n",
    "\n",
    "        if model_weights is not None:\n",
    "            model_weights = model_weights.float() / model_weights.float().sum(1, keepdim = True)\n",
    "        loss_list = torch.cat([self.loss_fun(pred[:, i:i+1], target, is_mean = False) for i in range(num)], 1)\n",
    "\n",
    "        # Modify model_weights according to neglect_threshold if stipulated:\n",
    "        if neglect_threshold_on:\n",
    "            neglect_threshold = self.neglect_threshold\n",
    "        else:\n",
    "            neglect_threshold = None\n",
    "        if neglect_threshold is not None:\n",
    "            valid_candidate = (loss_list <= neglect_threshold).long()\n",
    "            renew_id = valid_candidate.sum(1) < 1\n",
    "            valid_weights = valid_candidate.clone().masked_fill_(renew_id.unsqueeze(1), 1).float()\n",
    "            if model_weights is None:\n",
    "                model_weights = valid_weights\n",
    "            else:\n",
    "                model_weights = model_weights * valid_weights + self.epsilon\n",
    "\n",
    "        # Setting cumu_mode:\n",
    "        if cumu_mode is None:\n",
    "            cumu_mode = self.cumu_mode\n",
    "        if cumu_mode[0] == \"generalized-mean\" and cumu_mode[1] == 1:\n",
    "            cumu_mode = \"mean\"\n",
    "        elif cumu_mode[0] == \"generalized-mean\" and cumu_mode[1] == 0:\n",
    "            cumu_mode = \"geometric\"\n",
    "        elif cumu_mode[0] == \"generalized-mean\" and cumu_mode[1] == -1:\n",
    "            cumu_mode = \"harmonic\"\n",
    "\n",
    "        # Obtain loss:\n",
    "        if cumu_mode == \"original\":\n",
    "            loss = loss_list\n",
    "            if model_weights is not None:\n",
    "                loss = loss * model_weights\n",
    "        elif cumu_mode == \"mean\":\n",
    "            if model_weights is None:\n",
    "                loss = loss_list.mean(1)\n",
    "            else:\n",
    "                loss = (loss_list * model_weights).sum(1)\n",
    "        elif cumu_mode == \"min\":\n",
    "            loss = loss_list.min(1)[0]\n",
    "        elif cumu_mode == \"max\":\n",
    "            loss = loss_list.max(1)[0]\n",
    "        elif cumu_mode == \"harmonic\":\n",
    "            if model_weights is None:\n",
    "                loss = num / (1 / (loss_list + self.epsilon)).sum(1)\n",
    "            else:\n",
    "                loss = 1 / (model_weights / (loss_list + self.epsilon)).sum(1)\n",
    "        elif cumu_mode == \"geometric\":\n",
    "            if model_weights is None:\n",
    "                loss = (loss_list + self.epsilon).prod(1) ** (1 / float(num))\n",
    "            else:\n",
    "                loss = torch.exp((model_weights * torch.log(loss_list + self.epsilon)).sum(1))\n",
    "        elif cumu_mode[0] == \"generalized-mean\":\n",
    "            order = cumu_mode[1]\n",
    "            if model_weights is None:\n",
    "                loss = (((loss_list + self.epsilon) ** order).clamp(max = 1e30).mean(1)) ** (1 / float(order))\n",
    "            else:\n",
    "                loss = ((model_weights * (loss_list + self.epsilon) ** order).clamp(max = 1e30).sum(1)) ** (1 / float(order))\n",
    "        elif cumu_mode[0] == \"DL-generalized-mean\":\n",
    "            if self.loss_precision_floor is None:\n",
    "                loss_precision_floor = PrecisionFloorLoss\n",
    "            else:\n",
    "                loss_precision_floor = self.loss_precision_floor\n",
    "            order = cumu_mode[1]\n",
    "            if model_weights is None:\n",
    "                loss = logplus((((loss_list + self.epsilon) ** order).clamp(max = 1e30).mean(1)) ** (1 / float(order)) / loss_precision_floor)\n",
    "            else:\n",
    "                loss = logplus(((model_weights * (loss_list + self.epsilon) ** order).clamp(max = 1e30).sum(1)) ** (1 / float(order)) / loss_precision_floor)\n",
    "        else:\n",
    "            raise Exception(\"cumu_mode {0} not recognized!\".format(cumu_mode))\n",
    "\n",
    "        # Balance model influence so that each model has the same total weights on the samples:\n",
    "        if balance_model_influence is None:\n",
    "            balance_model_influence = self.balance_model_influence\n",
    "        if model_weights is not None and balance_model_influence is True: \n",
    "            num_examples = len(model_weights)\n",
    "            if sample_weights is None:\n",
    "                sample_weights = (model_weights.float() / (self.balance_model_influence_epsilon * num_examples + model_weights.float().sum(0, keepdim = True))).sum(1)\n",
    "            else:\n",
    "                sample_weights = (sample_weights * model_weights.float() / (self.balance_model_influence_epsilon * num_examples + product.sum(0, keepdim = True))).sum(1)\n",
    "            sample_weights = sample_weights / sample_weights.sum() * num_examples\n",
    "\n",
    "        # Multiply by sample weights:\n",
    "        if sample_weights is not None:\n",
    "            assert tuple(sample_weights.size()) == tuple(loss.size()), \"sample_weights must have the same size as the accumulated loss!\"\n",
    "            loss = loss * sample_weights\n",
    "\n",
    "        # Calculate the mean:\n",
    "        if is_mean:\n",
    "            loss = loss.mean()\n",
    "        return loss\n",
    "    \n",
    "    \n",
    "def get_Lagrangian_loss_indi(model, X, dt = Dt, force_kinetic_term = False, normalize = True):\n",
    "    \"\"\"Obtain individual loss for Euler-Lagrangian Equation\n",
    "    qx1, qdotx1, qy1, qdoty1, qx2, qdotx2, qy2, qdoty2 are the 8 dimensions of the X.\n",
    "    \"\"\"\n",
    "    assert X.shape[-1] == 8, \"The input dimension for the Lagragian must be 8!\"\n",
    "    X = X.detach()\n",
    "    X.requires_grad = True\n",
    "    if force_kinetic_term:\n",
    "        pred1 = model(X[:,:4]) + (X[:,1:2] ** 2 + X[:,3:4] ** 2) / 2\n",
    "        pred2 = model(X[:,4:]) + (X[:,5:6] ** 2 + X[:,7:8] ** 2) / 2\n",
    "    else:\n",
    "        pred1 = model(X[:,:4])\n",
    "        pred2 = model(X[:,4:])\n",
    "    pred_X = grad((pred1 + pred2).sum(), X, create_graph = True)[0]\n",
    "    eq_x = pred_X[:, 5:6] - pred_X[:, 1:2] - dt * (pred_X[:, 4:5] + pred_X[:, 0:1]) / 2\n",
    "    eq_y = pred_X[:, 7:8] - pred_X[:, 3:4] - dt * (pred_X[:, 6:7] + pred_X[:, 2:3]) / 2\n",
    "    eq = torch.cat([eq_x, eq_y], 1)\n",
    "    \n",
    "    if normalize:\n",
    "        eq = eq / model(X[:,4:])\n",
    "    return eq\n",
    "\n",
    "\n",
    "\n",
    "def get_Lagrangian_loss(model, X, dt = Dt, force_kinetic_term = True):\n",
    "    \"\"\"Obtain individual loss for Euler-Lagrangian Equation for either individual model or a model ensemble\"\"\"\n",
    "    if model.__class__.__name__ != \"Model_Ensemble\":\n",
    "        return get_Lagrangian_loss_indi(model, X, dt = dt, force_kinetic_term = force_kinetic_term)\n",
    "    eq_list = []\n",
    "    for i in range(model.num_models):\n",
    "        eq = get_Lagrangian_loss(getattr(model, \"model_{0}\".format(i)), X, dt = dt, force_kinetic_term = force_kinetic_term)\n",
    "        eq_list.append(eq)\n",
    "    preds = torch.stack(eq_list, 1)\n",
    "    return preds"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Models:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## The Statistics_Net and Generative_Net is a variant of the architecture in \n",
    "## Wu, Tailin, et al. \"Meta-learning autoencoders for few-shot prediction.\" arXiv preprint arXiv:1807.09912 (2018).\n",
    "class Statistics_Net(nn.Module):\n",
    "    def __init__(self, input_size, pre_pooling_neurons, struct_param_pre, struct_param_post, struct_param_post_logvar = None, pooling = \"max\", settings = {\"activation\": \"leakyRelu\"}, layer_type = \"Simple_layer\", is_cuda = False):\n",
    "        super(Statistics_Net, self).__init__()\n",
    "        self.input_size = input_size\n",
    "        self.pre_pooling_neurons = pre_pooling_neurons\n",
    "        self.struct_param_pre = struct_param_pre\n",
    "        self.struct_param_post = struct_param_post\n",
    "        self.struct_param_post_logvar = struct_param_post_logvar\n",
    "        self.pooling = pooling\n",
    "        self.settings = settings\n",
    "        self.layer_type = layer_type\n",
    "        self.is_cuda = is_cuda\n",
    "\n",
    "        self.encoding_statistics_Net = MLP(input_size = self.input_size, struct_param = self.struct_param_pre, settings = self.settings, is_cuda = is_cuda)\n",
    "        self.post_pooling_Net = MLP(input_size = self.pre_pooling_neurons, struct_param = self.struct_param_post, settings = self.settings, is_cuda = is_cuda)\n",
    "        if self.struct_param_post_logvar is not None:\n",
    "            self.post_pooling_logvar_Net = MLP(input_size = self.pre_pooling_neurons, struct_param = self.struct_param_post_logvar, settings = self.settings, is_cuda = is_cuda)\n",
    "        if self.is_cuda:\n",
    "            self.cuda()\n",
    "\n",
    "    @property\n",
    "    def model_dict(self):\n",
    "        model_dict = {\"type\": \"Statistics_Net\"}\n",
    "        model_dict[\"input_size\"] = self.input_size\n",
    "        model_dict[\"pre_pooling_neurons\"] = self.pre_pooling_neurons\n",
    "        model_dict[\"struct_param_pre\"] = self.struct_param_pre\n",
    "        model_dict[\"struct_param_post\"] = self.struct_param_post\n",
    "        model_dict[\"struct_param_post_logvar\"] = self.struct_param_post_logvar\n",
    "        model_dict[\"pooling\"] = self.pooling\n",
    "        model_dict[\"settings\"] = self.settings\n",
    "        model_dict[\"layer_type\"] = self.layer_type\n",
    "        model_dict[\"encoding_statistics_Net\"] = self.encoding_statistics_Net.model_dict\n",
    "        model_dict[\"post_pooling_Net\"] = self.post_pooling_Net.model_dict\n",
    "        if self.struct_param_post_logvar is not None:\n",
    "            model_dict[\"post_pooling_logvar_Net\"] = self.post_pooling_logvar_Net.model_dict\n",
    "        return model_dict\n",
    "    \n",
    "    def load_model_dict(self, model_dict):\n",
    "        new_net = load_model_dict(model_dict, is_cuda = self.is_cuda)\n",
    "        self.__dict__.update(new_net.__dict__)\n",
    "\n",
    "    def forward(self, input):\n",
    "        encoding = self.encoding_statistics_Net(input)\n",
    "        if self.pooling == \"mean\":\n",
    "            pooled = encoding.mean(0)\n",
    "        elif self.pooling == \"max\":\n",
    "            pooled = encoding.max(0)[0]\n",
    "        else:\n",
    "            raise Exception(\"pooling {0} not recognized!\".format(self.pooling))\n",
    "        output = self.post_pooling_Net(pooled.unsqueeze(0))\n",
    "        if self.struct_param_post_logvar is None:\n",
    "            return output\n",
    "        else:\n",
    "            logvar = self.post_pooling_logvar_Net(pooled.unsqueeze(0))\n",
    "            return output, logvar\n",
    "    \n",
    "    def forward_inputs(self, X, y):\n",
    "        return self(torch.cat([X, y], 1))\n",
    "    \n",
    "\n",
    "    def get_regularization(self, source = [\"weight\", \"bias\"], mode = \"L1\"):\n",
    "        reg = self.encoding_statistics_Net.get_regularization(source = source, mode = mode) + \\\n",
    "              self.post_pooling_Net.get_regularization(source = source, mode = mode)\n",
    "        if self.struct_param_post_logvar is not None:\n",
    "            reg = reg + self.post_pooling_logvar_Net.get_regularization(source = source, mode = mode)\n",
    "        return reg\n",
    "\n",
    "\n",
    "class Generative_Net(nn.Module):\n",
    "    def __init__(\n",
    "        self, \n",
    "        input_size,\n",
    "        W_struct_param_list,\n",
    "        b_struct_param_list, \n",
    "        num_context_neurons = 0, \n",
    "        settings_generative = {\"activation\": \"leakyRelu\"}, \n",
    "        settings_model = {\"activation\": \"leakyRelu\"}, \n",
    "        learnable_latent_param = False,\n",
    "        last_layer_linear = True,\n",
    "        is_cuda = False,\n",
    "        ):\n",
    "        super(Generative_Net, self).__init__()\n",
    "        assert(len(W_struct_param_list) == len(b_struct_param_list))\n",
    "        self.input_size = input_size\n",
    "        self.W_struct_param_list = W_struct_param_list\n",
    "        self.b_struct_param_list = b_struct_param_list\n",
    "        self.num_context_neurons = num_context_neurons\n",
    "        self.settings_generative = settings_generative\n",
    "        self.settings_model = settings_model\n",
    "        self.learnable_latent_param = learnable_latent_param\n",
    "        self.last_layer_linear = last_layer_linear\n",
    "        self.is_cuda = is_cuda\n",
    "\n",
    "        for i, W_struct_param in enumerate(self.W_struct_param_list):\n",
    "            setattr(self, \"W_gen_{0}\".format(i), MLP(input_size = self.input_size + num_context_neurons, struct_param = W_struct_param, settings = self.settings_generative, is_cuda = is_cuda))\n",
    "            setattr(self, \"b_gen_{0}\".format(i), MLP(input_size = self.input_size + num_context_neurons, struct_param = self.b_struct_param_list[i], settings = self.settings_generative, is_cuda = is_cuda))\n",
    "        # Setting up latent param and context param:\n",
    "        self.latent_param = nn.Parameter(torch.randn(1, self.input_size)) if learnable_latent_param else None\n",
    "        if self.num_context_neurons > 0:\n",
    "            self.context = nn.Parameter(torch.randn(1, self.num_context_neurons))\n",
    "        if self.is_cuda:\n",
    "            self.cuda()\n",
    "\n",
    "    @property\n",
    "    def model_dict(self):\n",
    "        model_dict = {\"type\": \"Generative_Net\"}\n",
    "        model_dict[\"input_size\"] = self.input_size\n",
    "        model_dict[\"W_struct_param_list\"] = self.W_struct_param_list\n",
    "        model_dict[\"b_struct_param_list\"] = self.b_struct_param_list\n",
    "        model_dict[\"num_context_neurons\"] = self.num_context_neurons\n",
    "        model_dict[\"settings_generative\"] = self.settings_generative\n",
    "        model_dict[\"settings_model\"] = self.settings_model\n",
    "        model_dict[\"learnable_latent_param\"] = self.learnable_latent_param\n",
    "        model_dict[\"last_layer_linear\"] = self.last_layer_linear\n",
    "        for i, W_struct_param in enumerate(self.W_struct_param_list):\n",
    "            model_dict[\"W_gen_{0}\".format(i)] = getattr(self, \"W_gen_{0}\".format(i)).model_dict\n",
    "            model_dict[\"b_gen_{0}\".format(i)] = getattr(self, \"b_gen_{0}\".format(i)).model_dict\n",
    "        if self.latent_param is None:\n",
    "            model_dict[\"latent_param\"] = None\n",
    "        else:\n",
    "            model_dict[\"latent_param\"] = self.latent_param.cpu().data.numpy() if self.is_cuda else self.latent_param.data.numpy()\n",
    "        if hasattr(self, \"context\"):\n",
    "            model_dict[\"context\"] = self.context.data.numpy() if not self.is_cuda else self.context.cpu().data.numpy()\n",
    "        return model_dict\n",
    "    \n",
    "    def set_latent_param_learnable(self, mode):\n",
    "        if mode == \"on\":\n",
    "            if not self.learnable_latent_param:\n",
    "                self.learnable_latent_param = True\n",
    "                if self.latent_param is None:\n",
    "                    self.latent_param = nn.Parameter(torch.randn(1, self.input_size))\n",
    "                else:\n",
    "                    self.latent_param = nn.Parameter(self.latent_param.data)\n",
    "            else:\n",
    "                assert isinstance(self.latent_param, nn.Parameter)\n",
    "        elif mode == \"off\":\n",
    "            if self.learnable_latent_param:\n",
    "                assert isinstance(self.latent_param, nn.Parameter)\n",
    "                self.learnable_latent_param = False\n",
    "                self.latent_param = Variable(self.latent_param.data, requires_grad = False)\n",
    "            else:\n",
    "                assert isinstance(self.latent_param, Variable) or self.latent_param is None\n",
    "        else:\n",
    "            raise\n",
    "\n",
    "    def load_model_dict(self, model_dict):\n",
    "        new_net = load_model_dict(model_dict, is_cuda = self.is_cuda)\n",
    "        self.__dict__.update(new_net.__dict__)\n",
    "\n",
    "    def init_weights_bias(self, latent_param):\n",
    "        if self.num_context_neurons > 0:\n",
    "            latent_param = torch.cat([latent_param, self.context], 1)\n",
    "        for i in range(len(self.W_struct_param_list)):\n",
    "            setattr(self, \"W_{0}\".format(i), (getattr(self, \"W_gen_{0}\".format(i))(latent_param)).squeeze(0))\n",
    "            setattr(self, \"b_{0}\".format(i), getattr(self, \"b_gen_{0}\".format(i))(latent_param))       \n",
    "\n",
    "    def get_weights_bias(self, W_source = None, b_source = None, isplot = False, latent_param = None):\n",
    "        if latent_param is not None:\n",
    "            self.init_weights_bias(latent_param)\n",
    "        W_list = []\n",
    "        b_list = []\n",
    "        if W_source is not None:\n",
    "            for k in range(len(self.W_struct_param_list)):\n",
    "                if W_source == \"core\":\n",
    "                    W = getattr(self, \"W_{0}\".format(k)).data.numpy()\n",
    "                else:\n",
    "                    raise Exception(\"W_source '{0}' not recognized!\".format(W_source))\n",
    "                W_list.append(W)\n",
    "        if b_source is not None:\n",
    "            for k in range(len(self.b_struct_param_list)):\n",
    "                if b_source == \"core\":\n",
    "                    b = getattr(self, \"b_{0}\".format(k)).data.numpy()\n",
    "                else:\n",
    "                    raise Exception(\"b_source '{0}' not recognized!\".format(b_source))\n",
    "                b_list.append(b)\n",
    "        if isplot:\n",
    "            if W_source is not None:\n",
    "                print(\"weight {0}:\".format(W_source))\n",
    "                plot_matrices(W_list)\n",
    "            if b_source is not None:\n",
    "                print(\"bias {0}:\".format(b_source))\n",
    "                plot_matrices(b_list)\n",
    "        return W_list, b_list\n",
    "\n",
    "    \n",
    "    def set_latent_param(self, latent_param):\n",
    "        assert isinstance(latent_param, Variable), \"The latent_param must be a Variable!\"\n",
    "        if self.learnable_latent_param:\n",
    "            self.latent_param.data.copy_(latent_param.data)\n",
    "        else:\n",
    "            self.latent_param = latent_param\n",
    "    \n",
    "    \n",
    "    def latent_param_quick_learn(self, X, y, validation_data, loss_core = \"huber\", epochs = 10, batch_size = 128, lr = 1e-2, optim_type = \"LBFGS\"):\n",
    "        assert self.learnable_latent_param is True, \"To quick-learn latent_param, you must set learnable_latent_param as True!\"\n",
    "        self.latent_param_optimizer = get_optimizer(optim_type = optim_type, lr = lr, parameters = [self.latent_param])\n",
    "        self.criterion = get_criterion(loss_core)\n",
    "        loss_list = []\n",
    "        X_test, y_test = validation_data\n",
    "        batch_size = min(batch_size, len(X))\n",
    "        if isinstance(X, Variable):\n",
    "            X = X.data\n",
    "        if isinstance(y, Variable):\n",
    "            y = y.data\n",
    "\n",
    "        dataset_train = data_utils.TensorDataset(X, y)\n",
    "        train_loader = data_utils.DataLoader(dataset_train, batch_size = batch_size, shuffle = True)\n",
    "        \n",
    "        y_pred_test = self(X_test)\n",
    "        loss = get_criterion(\"mse\")(y_pred_test, y_test)\n",
    "        loss_list.append(loss.data[0])\n",
    "        for i in range(epochs):\n",
    "            for batch_idx, (X_batch, y_batch) in enumerate(train_loader):\n",
    "                X_batch = Variable(X_batch)\n",
    "                y_batch = Variable(y_batch)\n",
    "                if optim_type == \"LBFGS\":\n",
    "                    def closure():\n",
    "                        self.latent_param_optimizer.zero_grad()\n",
    "                        y_pred = self(X_batch)\n",
    "                        loss = self.criterion(y_pred, y_batch)\n",
    "                        loss.backward()\n",
    "                        return loss\n",
    "                    self.latent_param_optimizer.step(closure)\n",
    "                else:\n",
    "                    self.latent_param_optimizer.zero_grad()\n",
    "                    y_pred = self(X_batch)\n",
    "                    loss = self.criterion(y_pred, y_batch)\n",
    "                    loss.backward()\n",
    "                    self.latent_param_optimizer.step()\n",
    "            y_pred_test = self(X_test)\n",
    "            loss = get_criterion(\"mse\")(y_pred_test, y_test)\n",
    "            loss_list.append(loss.data[0])\n",
    "        loss_list = np.array(loss_list)\n",
    "        return loss_list\n",
    "\n",
    "\n",
    "    def forward(self, input, latent_param = None):\n",
    "        if latent_param is None:\n",
    "            latent_param = self.latent_param\n",
    "        self.init_weights_bias(latent_param)\n",
    "        output = input\n",
    "        for i in range(len(self.W_struct_param_list)):\n",
    "            output = torch.matmul(output, getattr(self, \"W_{0}\".format(i))) + getattr(self, \"b_{0}\".format(i))\n",
    "            if i == len(self.W_struct_param_list) - 1 and hasattr(self, \"last_layer_linear\") and self.last_layer_linear:\n",
    "                activation = \"linear\"\n",
    "            else:\n",
    "                activation = self.settings_model[\"activation\"] if \"activation\" in self.settings_model else \"leakyRelu\"\n",
    "            output = get_activation(activation)(output)\n",
    "        return output\n",
    "\n",
    "\n",
    "    def get_regularization(self, source = [\"weight\", \"bias\"], mode = \"L1\"):\n",
    "        reg = Variable(torch.FloatTensor(np.array([0])), requires_grad = False)\n",
    "        if self.is_cuda:\n",
    "            reg = reg.cuda()\n",
    "        for reg_type in source:\n",
    "            if reg_type == \"weight\":\n",
    "                for i in range(len(self.W_struct_param_list)):\n",
    "                    if mode == \"L1\":\n",
    "                        reg = reg + getattr(self, \"W_{0}\".format(i)).abs().sum()\n",
    "                    else:\n",
    "                        raise\n",
    "            elif reg_type == \"bias\":\n",
    "                for i in range(len(self.W_struct_param_list)):\n",
    "                    if mode == \"L1\":\n",
    "                        reg = reg + getattr(self, \"b_{0}\".format(i)).abs().sum()\n",
    "                    else:\n",
    "                        raise\n",
    "            elif reg_type == \"W_gen\":\n",
    "                for i in range(len(self.W_struct_param_list)):\n",
    "                    reg = reg + getattr(self, \"W_gen_{0}\".format(i)).get_regularization(source = source, mode = mode)\n",
    "            elif reg_type == \"b_gen\":\n",
    "                for i in range(len(self.W_struct_param_list)):\n",
    "                    reg = reg + getattr(self, \"b_gen_{0}\".format(i)).get_regularization(source = source, mode = mode)\n",
    "            else:\n",
    "                raise Exception(\"source {0} not recognized!\".format(reg_type))\n",
    "        return reg"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
