{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "import os\n",
    "import numpy as np\n",
    "import math\n",
    "\n",
    "import torchvision.transforms as transforms\n",
    "from torchvision.utils import save_image\n",
    "\n",
    "from torch.utils.data import TensorDataset, DataLoader\n",
    "from torchvision import datasets\n",
    "from torch.autograd import Variable\n",
    "\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch\n",
    "from matplotlib import pyplot as plt\n",
    "import time\n",
    "\n",
    "import pylab as py\n",
    "import seaborn as sns\n",
    "import scipy.io as spio"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# set your device\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Architecture 1\n",
    "class PGNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(PGNN, self).__init__()\n",
    "\n",
    "        def block(in_feat, out_feat, normalize=True):\n",
    "            layers = [nn.Linear(in_feat, out_feat)]\n",
    "            if normalize:\n",
    "                layers.append(nn.BatchNorm1d(out_feat, 0.8))\n",
    "            layers.append(nn.LeakyReLU(0.2, inplace=True))\n",
    "            layers.append(nn.Dropout(0.2))\n",
    "            return layers\n",
    "        \n",
    "        self.model = nn.Sequential(\n",
    "            *block(data_dim, 40, normalize=False),\n",
    "            *block(40, 40),\n",
    "            *block(40, 40),\n",
    "            *block(40, 40),\n",
    "            *block(40, 40),\n",
    "            nn.Linear(40, out_dim)\n",
    "        )\n",
    "\n",
    "    def forward(self, data):\n",
    "        out = self.model(data)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def physics_loss(x, y, stat_x = [0,1], stat_y = [0,1]):  #stat [0] = mean, stat [1] = std\n",
    "    stat_x = torch.Tensor(stat_x).to(device)\n",
    "    stat_y = torch.Tensor(stat_y).to(device)\n",
    "    x = x * stat_x[1] + stat_x[0]\n",
    "    y = y * stat_y[1] + stat_y[0]\n",
    "    energy_loss =   - 0.5*x[:,4:5]*torch.pow(x[:,0:1],2) \\\n",
    "                    - 0.5*x[:,5:6]*torch.pow(x[:,1:2],2) \\\n",
    "                    + 0.5*x[:,4:5]*torch.pow(y[:,0:1],2) \\\n",
    "                    + 0.5*x[:,5:6]*torch.pow(y[:,1:2],2)\n",
    "                  \n",
    "    \n",
    "    momentum_loss =  - x[:,4:5] * x[:,0:1] \\\n",
    "                     - x[:,5:6] * x[:,1:2] \\\n",
    "                     + x[:,4:5] * y[:,0:1] \\\n",
    "                     + x[:,5:6] * y[:,1:2] \n",
    "    f = torch.cat([energy_loss, momentum_loss], dim = 1)\n",
    "    return f"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def find_lambda(adaptive_lambda, phy_loss, loss, net, beta = 0.9):\n",
    "    phyloss_layer = []\n",
    "    loss_layer = []\n",
    "    with torch.no_grad():\n",
    "        for layer in net.model.children():\n",
    "            if isinstance(layer, nn.Linear):\n",
    "                phyloss_layer.append(torch.abs(torch.autograd.grad(phy_loss, layer.weight, retain_graph = True)[0]).max())\n",
    "                loss_layer.append(torch.abs(torch.autograd.grad(loss, layer.weight, retain_graph = True)[0]).mean())\n",
    "    max_grad_res = torch.stack(phyloss_layer).max()     \n",
    "    mean_grad_loss = torch.stack(loss_layer).mean()\n",
    "    lambda_new = max_grad_res / mean_grad_loss\n",
    "    adaptive_lambda = (1 - beta) * adaptive_lambda + beta * lambda_new\n",
    "    return adaptive_lambda"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def uncertainity_estimate(x, model, num_samples, stat = [0,1]):\n",
    "    outputs = np.stack([model(x).cpu().detach().numpy()*stat[1]+stat[0] for i in range(num_samples)], axis = 0) # n번 inference, output.shape = [20, N]\n",
    "    y_mean = outputs.mean(axis=0)\n",
    "    y_variance = outputs.var(axis=0)\n",
    "    y_std = np.sqrt(y_variance)\n",
    "    return y_mean, y_std"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "tr_frac = 0.8\n",
    "##load data\n",
    "data = np.loadtxt( '../../datasets/collision_shuffled.txt' )\n",
    "labels = data[:,-2:]\n",
    "x = data[:,:-2]\n",
    "\n",
    "#training and test splits\n",
    "n_obs = int(tr_frac * x.shape[0])\n",
    "train_x , train_y = x[:n_obs,:] , labels[:n_obs,:] \n",
    "test_x , test_y = x[n_obs:,:] , labels[n_obs:,:] \n",
    "\n",
    "# Normalization \n",
    "\n",
    "# train_x:\n",
    "mean_x = train_x.mean(axis=0)\n",
    "std_x = train_x.std(axis=0)\n",
    "\n",
    "train_x = (train_x-mean_x)/std_x\n",
    "test_x = (test_x-mean_x)/std_x\n",
    "\n",
    "# train_y:\n",
    "mean_y = train_y.mean(axis=0)\n",
    "std_y = train_y.std(axis=0)\n",
    "\n",
    "train_y = (train_y-mean_y)/std_y\n",
    "test_y = (test_y-mean_y)/std_y\n",
    "\n",
    "#defining noise dimensions\n",
    "#         noise_dim = 2\n",
    "data_dim = train_x.shape[-1]\n",
    "out_dim = labels.shape[-1]\n",
    "\n",
    "#defining batch size and parameters\n",
    "batch_size = 64 # mini-batch size\n",
    "num_workers = 4 # how many parallel workers are we gonna use for reading data\n",
    "shuffle = True # shuffle the dataset\n",
    "\n",
    "\n",
    " #training and testing dataset creation\n",
    "train_x = torch.FloatTensor(train_x).to(device)\n",
    "test_x = torch.FloatTensor(test_x).to(device)\n",
    "train_y = torch.FloatTensor(train_y).to(device)\n",
    "test_y = torch.FloatTensor(test_y).to(device)\n",
    "\n",
    "train_loader = DataLoader(list(zip(train_x,train_y)), batch_size=batch_size, shuffle=shuffle)\n",
    "\n",
    "x_f = torch.cat([train_x, test_x], dim = 0)\n",
    "#         print(x_f.shape)\n",
    "net = PGNN().to(device)\n",
    "\n",
    "net_optimizer = torch.optim.Adam(net.parameters(), lr=1e-3, betas = (0.5, 0.999))\n",
    "\n",
    "num_epochs = 5000\n",
    "lambda_mse = 1\n",
    "#         lambda_phy = 0.001\n",
    "# Adv_loss = np.zeros(num_epochs)\n",
    "\n",
    "MSE_loss = np.zeros(num_epochs)\n",
    "PHY_loss = np.zeros(num_epochs)\n",
    "TOT_loss = np.zeros(num_epochs)\n",
    "\n",
    "\n",
    "train_pred = np.zeros((num_epochs,train_y.shape[0]))\n",
    "test_pred = np.zeros((num_epochs,test_y.shape[0]))\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    epoch_loss = 0\n",
    "    for i, (x, y) in enumerate(train_loader):\n",
    "\n",
    "        net_optimizer.zero_grad()\n",
    "        y_pred = net.forward(x)\n",
    "\n",
    "        y_f = net.forward(x_f)\n",
    "\n",
    "        phy_loss = torch.mean(torch.abs(physics_loss(x_f, y_f, [mean_x, std_x], [mean_y, std_y])))\n",
    "        mse_loss = torch.nn.functional.mse_loss(y_pred, y)\n",
    "\n",
    "        lambda_mse = find_lambda(lambda_mse, phy_loss, lambda_mse * mse_loss, net)\n",
    "\n",
    "        loss = lambda_mse * mse_loss + phy_loss\n",
    "        loss.backward()\n",
    "        net_optimizer.step()\n",
    "\n",
    "        MSE_loss[epoch] += mse_loss.detach().cpu().numpy()\n",
    "        PHY_loss[epoch] += phy_loss.detach().cpu().numpy()\n",
    "        TOT_loss[epoch] += loss.detach().cpu().numpy()\n",
    "\n",
    "    MSE_loss[epoch] = MSE_loss[epoch] / len(train_loader)\n",
    "    PHY_loss[epoch] = PHY_loss[epoch] / len(train_loader)\n",
    "    TOT_loss[epoch] = TOT_loss[epoch] / len(train_loader)\n",
    "\n",
    "    if (epoch % 100 == 0):\n",
    "        print(\n",
    "            \"[Epoch %d/%d] [MSE loss: %f] [Phy loss: %f] [Total loss: %f] [Lambda mse: %f]\"\n",
    "            % (epoch, num_epochs, MSE_loss[epoch], PHY_loss[epoch], TOT_loss[epoch], lambda_mse )\n",
    "        )\n",
    "\n",
    "\n",
    "\n",
    "# ###############################################################################################\n",
    "# ######################################## LOSS PLOTS ###########################################\n",
    "\n",
    "plt.figure(figsize=(10,10))\n",
    "plt.plot(MSE_loss)\n",
    "plt.plot(PHY_loss)\n",
    "plt.plot(TOT_loss)\n",
    "plt.legend(['MSE_loss','PHY_loss','Total_loss'])\n",
    "plt.show()\n",
    "\n",
    "# ###############################################################################################\n",
    "\n",
    "\n",
    "# ###############################################################################################\n",
    "# ################################## TEST PREDICTIONS ###########################################\n",
    "n_samples = 10000\n",
    "test_mean_y, test_std_y = uncertainity_estimate(test_x, net, n_samples, [mean_y, std_y])\n",
    "\n",
    "test_mean_y_0 = test_mean_y[:,0]\n",
    "test_mean_y_1 = test_mean_y[:,1]\n",
    "\n",
    "test_std_y_0 = test_std_y[:,0]\n",
    "test_std_y_1 = test_std_y[:,1]\n",
    "\n",
    "# print(test_y.shape)\n",
    "test_y_true_0 = test_y[:, 0].detach().cpu().numpy()*std_y[0] + mean_y[0]\n",
    "test_y_true_1 = test_y[:, 1].detach().cpu().numpy()*std_y[1] + mean_y[1]\n",
    "\n",
    "x = np.linspace(0, test_y.shape[0]-1, test_y.shape[0])\n",
    "\n",
    "\n",
    "plt.figure(figsize=(20,7))\n",
    "plt.plot(x, test_mean_y_0 , label = 'test predictions', alpha= 0.9, color='b', marker='+')\n",
    "plt.fill_between(x, test_mean_y_0-2*test_std_y_0, test_mean_y_0+2*test_std_y_0, alpha=0.2, color='b')\n",
    "# plt.errorbar(x,test_mean_y_0,test_std_y_0)\n",
    "plt.plot(x, test_y_true_0, label = 'ground truth', alpha=1, color='r', marker='*')\n",
    "py.legend(loc='upper right')\n",
    "plt.show()\n",
    "\n",
    "plt.figure(figsize=(20,7))\n",
    "plt.plot(x, test_mean_y_1 , label = 'test predictions', alpha= 0.9, color='b', marker='+')\n",
    "plt.fill_between(x, test_mean_y_1-2*test_std_y_1, test_mean_y_1+2*test_std_y_1, alpha=0.2, color='b')\n",
    "# plt.errorbar(x,mean_y,std_y)\n",
    "plt.plot(x, test_y_true_1, label = 'ground truth', alpha=1, color='r', marker='*')\n",
    "py.legend(loc='upper right')\n",
    "plt.show()\n",
    "\n",
    "test_x = test_x.detach().cpu().numpy()\n",
    "test_x = test_x * std_x + mean_x\n",
    "\n",
    "test_rmse0 = (((test_mean_y_0.flatten() - test_y_true_0.flatten())**2).mean())**0.5\n",
    "test_rmse1 = (((test_mean_y_1.flatten() - test_y_true_1.flatten())**2).mean())**0.5\n",
    "# test_rmse = (((np.stack(test_mean_y_0,test_mean_y_1) - test_y_true_1.flatten())**2).mean())**0.5\n",
    "\n",
    "energy_loss = np.mean(np.absolute(0.5*test_x[:,4]*np.power(test_x[:,0],2)+0.5*test_x[:,5]*np.power(test_x[:,1],2)\n",
    "                                  -0.5*test_x[:,4]*np.power(test_mean_y_0,2)-0.5*test_x[:,5]*np.power(test_mean_y_1,2)))\n",
    "\n",
    "momentum_loss = np.mean(np.absolute(test_x[:,4]*test_x[:,0] + test_x[:,5]*test_x[:,1] \n",
    "                                     - test_x[:,4]*test_mean_y_0 - test_x[:,5]*test_mean_y_1))\n",
    "test_phy_loss = (energy_loss + momentum_loss)\n",
    "\n",
    "test_true = np.stack((test_y_true_0, test_y_true_1), axis =-1).flatten()\n",
    "test_mean_y = np.stack((test_mean_y_0, test_mean_y_1), axis =-1).flatten()\n",
    "test_rmse = ((( test_mean_y - test_true)**2).mean())**0.5\n",
    "\n",
    "print(\"test RMSE = %f\" %(test_rmse))\n",
    "print(\"test RMSE va = %f\" %(test_rmse0))\n",
    "print(\"test RMSE vb = %f\" %(test_rmse1))\n",
    "print(\"test Physical Inconsistency = %f\" %(test_phy_loss))\n",
    "\n",
    "energy_loss_true = np.mean(np.absolute(0.5*test_x[:,4]*np.power(test_x[:,0],2)\n",
    "                                       +0.5*test_x[:,5]*np.power(test_x[:,1],2)-0.5*test_x[:,4]*np.power(test_y_true_0,2)\n",
    "                                       -0.5*test_x[:,5]*np.power(test_y_true_1,2)))\n",
    "\n",
    "momentum_loss_true = np.mean(np.absolute(test_x[:,4]*test_x[:,0] + test_x[:,5]*test_x[:,1] \n",
    "                                     - test_x[:,4]*test_y_true_0 - test_x[:,5]*test_y_true_1))\n",
    "test_phy_loss_true = (energy_loss_true + momentum_loss_true)\n",
    "print(\"test Physical Inconsistency (TRUE)= %f\" %(test_phy_loss_true))\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
