{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "C:\\Users\\xkc\\Desktop\\benchmark_xkc\n"
     ]
    }
   ],
   "source": [
    "cd C:\\Users\\xkc\\Desktop\\benchmark_xkc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"Implements dataloaders for generic MIMIC tasks.\"\"\"\n",
    "from tqdm import tqdm\n",
    "import sys\n",
    "import os\n",
    "import numpy as np\n",
    "from torch.utils.data import DataLoader\n",
    "import random\n",
    "import pickle\n",
    "import copy\n",
    "sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "task = 7; batch_size = 40; num_workers = 1; train_shuffle = True; imputed_path = 'data/MIMIC_III/im.pk'; flatten_time_series = False; tabular_robust = True; timeseries_robust = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"Get dataloaders for MIMIC dataset.\\n\\nArgs:\\n    task (int): Integer between -1 and 19 inclusive, -1 means mortality task, 0-19 means icd9 task.\\n    batch_size (int, optional): Batch size. Defaults to 40.\\n    num_workers (int, optional): Number of workers to load data in. Defaults to 1.\\n    train_shuffle (bool, optional): Whether to shuffle training data or not. Defaults to True.\\n    imputed_path (str, optional): Datafile location. Defaults to 'im.pk'.\\n    flatten_time_series (bool, optional): Whether to flatten time series data or not. Defaults to False.\\n    tabular_robust (bool, optional): Whether to apply tabular robustness as dataset augmentation or not. Defaults to True.\\n    timeseries_robust (bool, optional): Whether to apply timeseries robustness noises as dataset augmentation or not. Defaults to True.\\n\\nReturns:\\n    tuple: Tuple of training dataloader, validation dataloader, and test dataloader\\n\""
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\"\"\"Get dataloaders for MIMIC dataset.\n",
    "\n",
    "Args:\n",
    "    task (int): Integer between -1 and 19 inclusive, -1 means mortality task, 0-19 means icd9 task.\n",
    "    batch_size (int, optional): Batch size. Defaults to 40.\n",
    "    num_workers (int, optional): Number of workers to load data in. Defaults to 1.\n",
    "    train_shuffle (bool, optional): Whether to shuffle training data or not. Defaults to True.\n",
    "    imputed_path (str, optional): Datafile location. Defaults to 'im.pk'.\n",
    "    flatten_time_series (bool, optional): Whether to flatten time series data or not. Defaults to False.\n",
    "    tabular_robust (bool, optional): Whether to apply tabular robustness as dataset augmentation or not. Defaults to True.\n",
    "    timeseries_robust (bool, optional): Whether to apply timeseries robustness noises as dataset augmentation or not. Defaults to True.\n",
    "\n",
    "Returns:\n",
    "    tuple: Tuple of training dataloader, validation dataloader, and test dataloader\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "f = open(imputed_path, 'rb')\n",
    "datafile = pickle.load(f)\n",
    "f.close()\n",
    "X_t = datafile['ep_tdata']  # 时间序列数据\n",
    "X_s = datafile['adm_features_all']  # 静态（表格）数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 处理 NaN 或 无穷大（Inf） 数据，防止异常值影响训练\n",
    "X_t[np.isinf(X_t)] = 0\n",
    "X_t[np.isnan(X_t)] = 0\n",
    "X_s[np.isinf(X_s)] = 0\n",
    "X_s[np.isnan(X_s)] = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "#计算 均值 (avg) 和 标准差 (std)，用于标准化数据\n",
    "X_s_avg = np.average(X_s, axis=0)\n",
    "X_s_std = np.std(X_s, axis=0)\n",
    "X_t_avg = np.average(X_t, axis=(0, 1))\n",
    "X_t_std = np.std(X_t, axis=(0, 1))\n",
    "\n",
    "for i in range(len(X_s)):\n",
    "    X_s[i] = (X_s[i]-X_s_avg)/X_s_std\n",
    "    for j in range(len(X_t[0])):\n",
    "        X_t[i][j] = (X_t[i][j]-X_t_avg)/X_t_std"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([2.72916201e+04, 8.64606328e-03, 2.73792004e-02, 5.97252882e-02,\n",
       "       4.80929605e-01])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_s_avg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 14.55921042, 120.06630195,  85.20360087,  36.7675322 ,\n",
       "         1.81327318, 155.80676542,  23.91229671,  12.16554312,\n",
       "        23.7701156 , 138.44216267,   4.13967805,   0.85091724])"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_t_avg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "static_dim = len(X_s[0])\n",
    "timestep = len(X_t[0])\n",
    "series_dim = len(X_t[0][0])\n",
    "if flatten_time_series:\n",
    "    X_t = X_t.reshape(len(X_t), timestep*series_dim)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "if task < 0:\n",
    "    y = datafile['adm_labels_all'][:, 1]\n",
    "    admlbl = datafile['adm_labels_all']\n",
    "    le = len(y)\n",
    "    for i in range(0, le):\n",
    "        if admlbl[i][1] > 0:\n",
    "            y[i] = 1\n",
    "        elif admlbl[i][2] > 0:\n",
    "            y[i] = 2\n",
    "        elif admlbl[i][3] > 0:\n",
    "            y[i] = 3\n",
    "        elif admlbl[i][4] > 0:\n",
    "            y[i] = 4\n",
    "        elif admlbl[i][5] > 0:\n",
    "            y[i] = 5\n",
    "        else:\n",
    "            y[i] = 0\n",
    "else:\n",
    "    y = datafile['y_icd9'][:, task]\n",
    "    le = len(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 列表（表格数据、时序数据、标签）\n",
    "datasets = [(X_s[i], X_t[i], y[i]) for i in range(le)]\n",
    "\n",
    "random.seed(10)\n",
    "\n",
    "random.shuffle(datasets)\n",
    "\n",
    "valids = DataLoader(datasets[0:le//10], shuffle=False,\n",
    "                    num_workers=num_workers, batch_size=batch_size)\n",
    "trains = DataLoader(datasets[le//5:], shuffle=train_shuffle,\n",
    "                    num_workers=num_workers, batch_size=batch_size)\n",
    "\n",
    "tests = DataLoader(datasets[0:le//5], shuffle=False, num_workers=num_workers, batch_size=batch_size)  # 20% 测试集\n",
    "valids = DataLoader(datasets[le//5:le//4], shuffle=False, num_workers=num_workers, batch_size=batch_size)  # 5% 验证集\n",
    "trains = DataLoader(datasets[le//4:], shuffle=train_shuffle, num_workers=num_workers, batch_size=batch_size)  # 75% 训练集\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(<torch.utils.data.dataloader.DataLoader at 0x22595c83c10>,\n",
       " <torch.utils.data.dataloader.DataLoader at 0x225c5d88340>,\n",
       " <torch.utils.data.dataloader.DataLoader at 0x225c5c752e0>)"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "trains, valids, tests"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "import torch\n",
    "from torch import nn\n",
    "\n",
    "sys.path.append(os.getcwd())\n",
    "\n",
    "from unimodals.common_models import MLP, GRU # noqa\n",
    "from fusions.common_fusions import Concat # noqa\n",
    "from training_structures.Supervised_Learning import train, test # noqa"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "traindata, validdata, testdata = trains, valids, tests"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0 train loss: tensor(0.6307, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0 valid loss: tensor(0.6014, device='cuda:0') acc: 0.680564071122011\n",
      "Saving Best\n",
      "AUPRC: 0.7377403074352069\n",
      "Epoch 1 train loss: tensor(0.6071, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1 valid loss: tensor(0.6010, device='cuda:0') acc: 0.6695278969957081\n",
      "AUPRC: 0.7501216990998087\n",
      "Epoch 2 train loss: tensor(0.5998, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2 valid loss: tensor(0.5941, device='cuda:0') acc: 0.688534641324341\n",
      "Saving Best\n",
      "AUPRC: 0.7480422631811952\n",
      "Epoch 3 train loss: tensor(0.5966, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3 valid loss: tensor(0.5920, device='cuda:0') acc: 0.6916002452483139\n",
      "Saving Best\n",
      "AUPRC: 0.7565605153021964\n",
      "Epoch 4 train loss: tensor(0.5927, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4 valid loss: tensor(0.6047, device='cuda:0') acc: 0.6732066217044758\n",
      "AUPRC: 0.7494454551546812\n",
      "Epoch 5 train loss: tensor(0.5883, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5 valid loss: tensor(0.5904, device='cuda:0') acc: 0.6879215205395462\n",
      "AUPRC: 0.7599883765404218\n",
      "Epoch 6 train loss: tensor(0.5859, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 6 valid loss: tensor(0.5877, device='cuda:0') acc: 0.677498467198038\n",
      "AUPRC: 0.761689505703613\n",
      "Epoch 7 train loss: tensor(0.5819, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 7 valid loss: tensor(0.5939, device='cuda:0') acc: 0.688534641324341\n",
      "AUPRC: 0.765975788179235\n",
      "Epoch 8 train loss: tensor(0.5791, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 8 valid loss: tensor(0.5921, device='cuda:0') acc: 0.6866952789699571\n",
      "AUPRC: 0.7626055675222123\n",
      "Epoch 9 train loss: tensor(0.5762, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 9 valid loss: tensor(0.5960, device='cuda:0') acc: 0.683629675045984\n",
      "AUPRC: 0.7508373350669264\n",
      "Epoch 10 train loss: tensor(0.5738, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 10 valid loss: tensor(0.5909, device='cuda:0') acc: 0.683629675045984\n",
      "AUPRC: 0.7595893977819208\n",
      "Epoch 11 train loss: tensor(0.5704, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 11 valid loss: tensor(0.5890, device='cuda:0') acc: 0.6916002452483139\n",
      "AUPRC: 0.762518758787923\n",
      "Epoch 12 train loss: tensor(0.5680, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 12 valid loss: tensor(0.5969, device='cuda:0') acc: 0.674432863274065\n",
      "AUPRC: 0.7531500685521655\n",
      "Epoch 13 train loss: tensor(0.5657, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 13 valid loss: tensor(0.5942, device='cuda:0') acc: 0.6873083997547517\n",
      "AUPRC: 0.7560465524971047\n",
      "Epoch 14 train loss: tensor(0.5615, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 14 valid loss: tensor(0.6012, device='cuda:0') acc: 0.6824034334763949\n",
      "AUPRC: 0.7553463501070802\n",
      "Epoch 15 train loss: tensor(0.5587, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 15 valid loss: tensor(0.5998, device='cuda:0') acc: 0.6781115879828327\n",
      "AUPRC: 0.7525764741403278\n",
      "Epoch 16 train loss: tensor(0.5545, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 16 valid loss: tensor(0.6026, device='cuda:0') acc: 0.6824034334763949\n",
      "AUPRC: 0.7544213784431274\n",
      "Epoch 17 train loss: tensor(0.5505, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 17 valid loss: tensor(0.5994, device='cuda:0') acc: 0.6909871244635193\n",
      "AUPRC: 0.7573900744174565\n",
      "Epoch 18 train loss: tensor(0.5468, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 18 valid loss: tensor(0.6035, device='cuda:0') acc: 0.6842427958307786\n",
      "AUPRC: 0.7589262202684646\n",
      "Epoch 19 train loss: tensor(0.5429, device='cuda:0', grad_fn=<DivBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 19 valid loss: tensor(0.6177, device='cuda:0') acc: 0.6866952789699571\n",
      "AUPRC: 0.7443738913557494\n",
      "Training Time: 96.03429937362671\n",
      "Training Peak Mem: 1342.96484375\n",
      "Training Params: 33452\n"
     ]
    }
   ],
   "source": [
    "# build encoders, head and fusion layer\n",
    "encoders = [MLP(5, 10, 10, dropout=False).cuda(), GRU(\n",
    "    12, 30, dropout=False, batch_first=True).cuda()]\n",
    "head = MLP(730, 40, 2, dropout=False).cuda()\n",
    "fusion = Concat().cuda()\n",
    "\n",
    "# train\n",
    "train(encoders, fusion, head, traindata, validdata, 10, auprc=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Testing: \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\xkc\\AppData\\Local\\Temp\\ipykernel_55680\\947551509.py:3: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  model = torch.load('best.pt').cuda()\n",
      "c:\\Users\\xkc\\anaconda3\\envs\\py38\\lib\\site-packages\\torch\\nn\\modules\\module.py:1553: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  return self._call_impl(*args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "AUPRC: 0.7369896277838737\n",
      "acc: 0.6887934999233482\n",
      "Inference Time: 1.8813748359680176\n",
      "Inference Params: 33452\n"
     ]
    }
   ],
   "source": [
    "# test\n",
    "print(\"Testing: \")\n",
    "model = torch.load('best.pt').cuda()\n",
    "# dataset = 'mimic mortality', 'mimic 1', 'mimic 7'\n",
    "test(model, testdata, dataset='mimic 7', auprc=True)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py38",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
