{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torch.utils.data as Data\n",
    "torch.manual_seed(8) # for reproduce\n",
    "\n",
    "import time\n",
    "import numpy as np\n",
    "import gc\n",
    "import sys\n",
    "sys.setrecursionlimit(50000)\n",
    "import pickle\n",
    "torch.backends.cudnn.benchmark = True\n",
    "torch.set_default_tensor_type('torch.cuda.FloatTensor')\n",
    "from tensorboardX import SummaryWriter\n",
    "torch.nn.Module.dump_patches = True\n",
    "import copy\n",
    "import pandas as pd\n",
    "#then import my own modules\n",
    "from AttentiveFP import Fingerprint, Fingerprint_viz, save_smiles_dicts, get_smiles_dicts, get_smiles_array, moltosvg_highlight"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import roc_auc_score\n",
    "from sklearn.metrics import matthews_corrcoef\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import r2_score\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from sklearn.metrics import mean_absolute_error\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import precision_recall_curve\n",
    "from sklearn.metrics import auc\n",
    "from sklearn.metrics import f1_score\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from rdkit.Chem import rdMolDescriptors, MolSurf\n",
    "# from rdkit.Chem.Draw import SimilarityMaps\n",
    "from rdkit import Chem\n",
    "# from rdkit.Chem import AllChem\n",
    "from rdkit.Chem import QED\n",
    "%matplotlib inline\n",
    "from numpy.polynomial.polynomial import polyfit\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.cm as cm\n",
    "import matplotlib\n",
    "from IPython.display import SVG, display\n",
    "import seaborn as sns; sns.set(color_codes=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "number of all smiles:  7831\n",
      "number of successfully processed smiles:  7831\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAU8AAAC/CAYAAAB+KF5fAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAEpVJREFUeJzt3X9M1df9x/HXvYKoSC98zS1+4+2q9npvilbpVhVnUzMQ0zZaQ7dsarIN69JuqdNlxbWboXGJ2QwlkgZirDNmzbqma5ZWQslEnZtuapnRadpAvRdsGuM3IihXBN2VH/f7R8fV64X74xTvD30+EpNyPu97OJxyX5z7+dzPuZZAIBAQACAu1mQPAADSEeEJAAYITwAwQHgCgAHCEwAMEJ4AYIDwBAADhCcAGCA8AcAA4QkABghPADBAeAKAAcITAAxkRCs4fvy46uvr9e9//1sXL16UzWbT3Llz9dOf/lRutzuk9ujRo3rzzTf12WefKTs7W6WlpaqoqNADDzwQUtfX16eamhrt27dPPT09cjqdevnll1VSUhL2/WPtM1bd3X0aGoq8kdSUKZN1+XKvUf+ID3OdWMx3OKvVory87LgfZ4m2Jd2GDRvk8/n0zDPP6JFHHlFXV5d2794tr9erP/zhDyosLJQkNTc364UXXlBJSYlWrVqlS5cuqbq6Wg6HQ++++66s1luL3LVr16qlpUUVFRVyOBz68MMP1dDQoJ07d2rJkiXBunj6jNXly71Rw9Nuz1Fn57W4+0b8mOvEYr7DWa0WTZkyOe7HRQ3Py5cva8qUKSFtPT09KikpUVFRkWprayVJ3/nOdzQwMKAPPvggGGpHjx7VCy+8oJqaGj377LOSpMOHD+vFF19UXV2dSktLJUmBQEBr1qyRz+fTX/7yl+D3ibXPeBCeqYW5TizmO5xpeEZdut0ZnJL0wAMP6OGHH9bFixclSR0dHfrkk0+0cuXKkNXg4sWLlZ+fr6ampmDbgQMHlJOTE/IS3WKxqKysTOfOnVNbW1vcfQJAohldMLpy5Yq8Xq9mzZolSfJ4PJIU/Pp2LpdLXq83+LXX65XT6Qx7yT18/nS4r3j6BIBEi3rB6E6BQECVlZUaGhrSunXrJEk+n0+SZLPZwuptNptaWlqCX/t8Pk2fPn3Eutv7iqfPeMS6PLfbc4z6R/yY68RivsdG3OFZVVWlgwcP6re//a0eeeSRkGMWi2XEx9zZPlpdPLWR+ogk0ec8B4Ykf//AqMezMjOUcR+/YYxzcInFfIczPecZV3jW1NRoz5492rx5s55//vlge25urqRbq8XbXb16NWT1mJubO2qddGulGU+fqczfP6ATrR2jHp//aL4ysuL+GwYgyWJe87z55pvauXOnNm3apB/84Achx4bPS450HtLj8YSct3Q6nWpvb9fQ0FBYnfTl+cx4+wSARIspPOvq6rRjxw5t3LhRP/rRj8KOT506VXPmzFFDQ0NIKB4/flwdHR1atmxZsK20tFQ9PT06dOhQSB979+7VjBkz5HQ64+4TABIt6uvFPXv2qLa2Vt/61rf0zW9+U6dPnw4eGz9+vAoKCiRJFRUVWrdunX7+85/re9/7njo6OlRdXa158+bp6aefDj5myZIlWrhwoTZv3iyfzyeHw6G9e/fq5MmT2rFjR8j3jrVPAEi0qG+S//73v69//etfIx6bNm1ayAryyJEjqq2tDd5KuXTpUm3atCns/GRvb6+2b9+upqamkNszly5dGvY9Yu0zVom+YNTnj37OM/s+PufJBYzEYr7D3bU7jO41hGdq4cmcWMx3uLt2hxEAIBzhCQAGCE8AMEB4AoABwhMADBCeAGCA8AQAA4QnABggPAHAAOEJAAYITwAwQHgCgAHCEwAMEJ4AYIDwBAADhCcAGCA8AcAA4QkABghPADBAeAKAAcITAAwQngBggPAEAAOEJwAYIDwBwADhCQAGCE8AMEB4AoABwhMADBCeAGCA8AQAAxnJHkA6GxiS/P0DEWuGAgkaDICEIjy/An//gE60dkSsmeeyJ2g0ABKJl+0AYIDwBAADhCcAGCA8AcAA4QkABghPADBAeAKAAcITAAwQngBggPAEAAOEJwAYIDwBwADhCQAG2FUpDUTb+i4rM0MZ/BkEEorwTAPRtr6b/2i+MrL4XwkkEusVADDAciXJLFaL+vzsRg+kG8Izyfz9gzrj6YxYw270QOrhZTsAGCA8AcAA4QkABghPADBAeAKAAcITAAzEFJ4XL17U1q1btXr1aj3++ONyu91qbm4esbahoUHPPfecHnvsMT311FOqrq6W3+8Pq+vq6tKrr76qhQsXqrCwUGvWrNGpU6e+Up8AkCgxhecXX3yhxsZGTZo0SUVFRaPW1dfXq6KiQl//+tf1u9/9Ti+99JL++Mc/6rXXXgup8/v9Ki8v14kTJ1RZWam6ujplZ2ervLxcLS0tRn0CQCLF9Cb5+fPn6/jx45KkgwcP6tChQ2E1g4ODeuONN1RcXKwtW7ZIkoqKipSZmanKykqVl5dr3rx5kqQ///nP8nq9+uCDDzR79mxJ0oIFC/TMM89o+/bt2r17d9x9AkAixbTytFqjl50+fVqdnZ0qKysLaV+xYoUyMzPV1NQUbDt48KBcLlcwOCVp/PjxWr58uY4dO6be3t64+wSARBqzC0Zer1eSNGvWrJD2iRMn6qGHHgoeH651uVxhfbjdbg0ODurcuXNx9wkAiTRm97b7fD5Jks1mCztms9mCx4drR6uTpO7u7rj7jNWUKZNjqrPbc6LWBK5cV87kCRFrMjMzItZEOx5LzaRJWbL/z6TIg01hscw1xg7zPTbGfGMQi8USU/todfHURupjNJcv92ooyjZFdnuOOjuvRe3run9A13r/E7Gmvz9yTbTjsdRcv+5X5+Bg5MGmqFjnGmOD+Q5ntVpiXlSFPG6sBpCbmytJI64Gr169GrJ6zM3NHbXu9r7i6RMAEmnMwtPpdEpS2HnIGzdu6Pz58yHnLZ1OpzweT1gfZ8+e1bhx4zRz5sy4+wSARBqz8CwsLJTdbld9fX1I+0cffaT+/n4tW7Ys2FZaWiqPx6PW1tZg282bN9XY2KhFixZp8uTJcfcJAIk0bsvwGyij2Ldvn9ra2nTmzBmdOnVKDodDV65c0YULFzR9+nRZrVbl5eVp165d6u7u1oQJE3TkyBFVVVWpuLhYa9euDfbldru1f/9+NTQ0yG6369KlS9q2bZvOnj2r6upqPfjgg5IUV5+xunHjpgJRdmbPzs7S9es3o/bVPzik/+vqi1gzdUq2Oi5fNz4eS800+2SNT9NPgIt1rjE2mO9wFotFkyaNj/txMV8w2rhxY8jXtbW1kqRp06YF3zRfVlYmq9Wq3bt36/3331deXp5WrVqlDRs2hDw2KytLb7/9tqqqqrRlyxb5/X4VFBRoz549mjNnTkhtrH0CQCJZAoFo67B7y1hebe/zR/5US+nLj9CI9DEb0Y7HUjP/0Xxlp+mnZ3L1N7GY73BJv9oOAPcTwhMADBCeAGCA8AQAA4QnABggPAHAAOEJAAYITwAwQHgCgIH0vC0FISxWi/r8AxFrsjIzlKa3vwMpifC8B/j7B6Pe4jn/0XxlpOktnEAqYi0CAAYITwAwQHgCgAHCEwAMEJ4AYIDwBAADhCcAGCA8AcAA4QkABghPADBAeAKAAcITAAwQngBggPAEAAOEJwAYIDwBwADhCQAGCE8AMEB4AoABwhMADBCeAGCA8AQAA4QnABggPAHAAOEJAAYykj0AJIbFalGffyBiTVZmhjL4cwrEhPC8T/j7B3XG0xmxZv6j+crI4lcCiAXrDAAwQHgCgAHCEwAMEJ4AYIDwBAADhCcAGCA8AcAAb+qLYGBI8veP/sbyoUACBwMgpRCeEfj7B3SitWPU4/Nc9gSOBkAq4WU7ABhg5YmgaPe/c+87cAvhiaBo979z7ztwC+sIADBAeAKAAcITAAwQngBggLP/iBm70QO3EJ6IGbvRA7ewRgAAA2kRnn19fdq6dauefPJJzZ07V88//7z++te/JntYAO5jaRGe69evV0NDgzZu3Ki33npLTqdT69ev1+HDh5M9NNxh+LzoaP8GhpI9QmBspPzJqcOHD+vYsWOqq6tTaWmpJKmoqEjnz5/Xtm3btGTJkiSPELeLdl50weyp8vff2o4qcOW6rt9xEYqLTkgHKR+eBw4cUE5OjkpKSoJtFotFZWVlqqysVFtbm5xOZxJHiHjcGa45kyfoWu9/QmruDNg7Ea5IBSkfnl6vV06nU1Zr6LPF7XZLkjweT1zhabVaYq7LGGfVpAmZo9ZEOx5LTar0kayxTszK0OBAaP3gUECtn18ZtY95LrsGByJvppqRMU4DA4MRa8ZnjNO4uxzCg0PSzRQYx+1ifQ7cL0znI+XD0+fzafr06WHtNpsteDweeXnZMdVNmTJZkuT4X1vEupmOvKh9RatJlT4S9X3GaqwwM/y7ja8mLV78WCyj/2WIdAwA7paUD8/c3NwRV5dXr16VdGsFCgCJlPLh6XQ61d7erqGh0Pe4eDweSZLL5UrGsADc51I+PEtLS9XT06NDhw6FtO/du1czZszgSjuApEj5C0ZLlizRwoULtXnzZvl8PjkcDu3du1cnT57Ujh07kj08APcpSyAQSPkP0O3t7dX27dvV1NSknp4eOZ1Ovfzyy1q6dGmyhwbgPpUW4QkAqSblz3kCQCoiPAHAAOH5X2x7N/aam5vldrtH/Nfe3h5Se/ToUX33u9/V3LlztWjRIr3++uvq6elJ0shT38WLF7V161atXr1ajz/+uNxut5qbm0esbWho0HPPPafHHntMTz31lKqrq+X3+8Pqurq69Oqrr2rhwoUqLCzUmjVrdOrUqbv9o6StlL/anijr169XS0uLKioq5HA49OGHH2r9+vXauXMnOzd9RRUVFZo/f35Im8PhCP53c3OzXnzxRZWUlOhnP/uZLl26pOrqank8Hr377rth+xpA+uKLL9TY2KiCggIVFRWFvZVvWH19vX7xi19o9erV+tWvfqX29nZVV1frwoULqqmpCdb5/X6Vl5fr+vXrqqysVG5urt5++22Vl5frvffeU0FBQaJ+tPQRQODvf/97wOVyBfbv3x9sGxoaCqxatSrw9NNPJ3Fk6e3jjz8OuFyuwIEDByLWffvb3w6sXLkyMDg4GGz75z//GXC5XIHGxsa7Pcy0dPtcHThwIOByuQIff/xxSM3AwEBg8eLFgR//+Mch7X/6058CLpcrcPr06WDbO++8E3C5XIFPP/002Ob3+wPFxcWBdevW3aWfIr3xJ12Rt707d+6c2trakji6e1tHR4c++eQTrVy5MmSFuXjxYuXn56upqSmJo0tdsazGT58+rc7OTpWVlYW0r1ixQpmZmSFze/DgQblcLs2ePTvYNn78eC1fvlzHjh1Tb2/v2A3+HkF4KrZt72Du9ddfV0FBgb7xjW/opZde0qeffho8Njy3s2bNCnucy+WS1+tN2DjvNcNzd+fcTpw4UQ899FDI3Hq93hFvdXa73RocHNS5c+fu7mDTEOc8Nfbb3uFLOTk5+uEPf6gFCxYoNzdX7e3t2rVrl1avXq133nlH8+bNC87tSBu82Gw2tbS0JHrY94xoc3v777XP5xu1TpK6u7vv0ijTF+H5X2x7N/YKCgpCLjQ88cQTKi4u1vLly1VTU6Pf//73wWOjzTFz/9XFOrc8B+LDy3ax7V0i2e12Pfnkkzpz5oykL+deGnl1f/XqVeb+K4hnbqM9B4b7wi2Ep9j2LtFun+fh83Ejndv0eDwjngtFbIZ3HLtzbm/cuKHz58+HzK3T6Rzx3P7Zs2c1btw4zZw58+4ONg0RnmLbu0Tq7OzUsWPHVFhYKEmaOnWq5syZo4aGhpBQPX78uDo6OrRs2bJkDTXtFRYWym63q76+PqT9o48+Un9/f8jclpaWyuPxqLW1Ndh28+ZNNTY2atGiRZo8mY/uuNO4LVu2bEn2IJLt4Ycf1okTJ/T+++8rLy9PPT09qqur09/+9jf95je/0YwZM5I9xLT0yiuvqLW1VdeuXVNXV5f+8Y9/6Je//KWuXbumN954Q/n5+ZKkr33ta9qzZ4/a2tpks9l08uRJ/frXv9asWbP02muv8Sb5Uezbt09tbW06c+aMTp06JYfDoStXrujChQuaPn26rFar8vLytGvXLnV3d2vChAk6cuSIqqqqVFxcrLVr1wb7crvd2r9/vxoaGmS323Xp0iVt27ZNZ8+eVXV1tR588MEk/qSpiV2V/ott78berl271NjYqAsXLujGjRvKzc3VggUL9JOf/CTsVMiRI0dUW1urzz77TNnZ2Vq6dKk2bdrEOc8Iht9Kd6dp06aFvIqqr6/X7t279fnnnysvL08rVqzQhg0bNGHChJDHdXZ2qqqqSocPH5bf71dBQYFeeeUVPfHEE3f150hXhCcAGOD1EAAYIDwBwADhCQAGCE8AMEB4AoABwhMADBCeAGCA8AQAA4QnABj4fwWz0YSiOR5XAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 360x216 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "task_name = 'tox21'\n",
    "tasks = [\n",
    "  'NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',\n",
    "  'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'\n",
    "]\n",
    "raw_filename = \"../data/tox21.csv\"\n",
    "feature_filename = raw_filename.replace('.csv','.pickle')\n",
    "filename = raw_filename.replace('.csv','')\n",
    "prefix_filename = raw_filename.split('/')[-1].replace('.csv','')\n",
    "smiles_tasks_df = pd.read_csv(raw_filename)\n",
    "smilesList = smiles_tasks_df.smiles.values\n",
    "print(\"number of all smiles: \",len(smilesList))\n",
    "atom_num_dist = []\n",
    "remained_smiles = []\n",
    "canonical_smiles_list = []\n",
    "for smiles in smilesList:\n",
    "    try:        \n",
    "        mol = Chem.MolFromSmiles(smiles)\n",
    "        atom_num_dist.append(len(mol.GetAtoms()))\n",
    "        remained_smiles.append(smiles)\n",
    "        canonical_smiles_list.append(Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True))\n",
    "    except:\n",
    "        print(\"not successfully processed smiles: \", smiles)\n",
    "        pass\n",
    "print(\"number of successfully processed smiles: \", len(remained_smiles))\n",
    "smiles_tasks_df = smiles_tasks_df[smiles_tasks_df[\"smiles\"].isin(remained_smiles)]\n",
    "# print(smiles_tasks_df)\n",
    "smiles_tasks_df['cano_smiles'] =canonical_smiles_list\n",
    "\n",
    "plt.figure(figsize=(5, 3))\n",
    "sns.set(font_scale=1.5)\n",
    "ax = sns.distplot(atom_num_dist, bins=28, kde=False)\n",
    "plt.tight_layout()\n",
    "# plt.savefig(\"atom_num_dist_\"+prefix_filename+\".png\",dpi=200)\n",
    "plt.show()\n",
    "plt.close()\n",
    "\n",
    "# print(len([i for i in atom_num_dist if i<51]),len([i for i in atom_num_dist if i>50]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "random_seed = 888\n",
    "start_time = str(time.ctime()).replace(':','-').replace(' ','_')\n",
    "start = time.time()\n",
    "\n",
    "batch_size = 100\n",
    "epochs = 800\n",
    "p_dropout = 0.5\n",
    "fingerprint_dim = 200\n",
    "\n",
    "radius = 3\n",
    "T = 3\n",
    "weight_decay = 3 # also known as l2_regularization_lambda\n",
    "learning_rate = 3.5\n",
    "per_task_output_units_num = 2 # for classification model with 2 classes\n",
    "output_units_num = len(tasks) * per_task_output_units_num"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>NR-AR</th>\n",
       "      <th>NR-AR-LBD</th>\n",
       "      <th>NR-AhR</th>\n",
       "      <th>NR-Aromatase</th>\n",
       "      <th>NR-ER</th>\n",
       "      <th>NR-ER-LBD</th>\n",
       "      <th>NR-PPAR-gamma</th>\n",
       "      <th>SR-ARE</th>\n",
       "      <th>SR-ATAD5</th>\n",
       "      <th>SR-HSE</th>\n",
       "      <th>SR-MMP</th>\n",
       "      <th>SR-p53</th>\n",
       "      <th>mol_id</th>\n",
       "      <th>smiles</th>\n",
       "      <th>cano_smiles</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>95</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX14836</td>\n",
       "      <td>[I-].[K+]</td>\n",
       "      <td>[I-].[K+]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>255</th>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>1.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>1.0</td>\n",
       "      <td>TOX811</td>\n",
       "      <td>[Hg+2]</td>\n",
       "      <td>[Hg+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>659</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX131</td>\n",
       "      <td>[Ba+2]</td>\n",
       "      <td>[Ba+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>985</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>TOX4331</td>\n",
       "      <td>[TlH2+]</td>\n",
       "      <td>[TlH2+]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1423</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX327</td>\n",
       "      <td>[Cr+3]</td>\n",
       "      <td>[Cr+3]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1534</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX9688</td>\n",
       "      <td>[Fe+2]</td>\n",
       "      <td>[Fe+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1722</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX340</td>\n",
       "      <td>[Co+2]</td>\n",
       "      <td>[Co+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1933</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX11521</td>\n",
       "      <td>[PbH2+2]</td>\n",
       "      <td>[PbH2+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2147</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX622</td>\n",
       "      <td>[Fe+3]</td>\n",
       "      <td>[Fe+3]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2251</th>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX20449</td>\n",
       "      <td>[Cu+2]</td>\n",
       "      <td>[Cu+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2760</th>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>1.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>TOX226</td>\n",
       "      <td>[Cd+2]</td>\n",
       "      <td>[Cd+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2832</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX1351</td>\n",
       "      <td>[SnH2+2]</td>\n",
       "      <td>[SnH2+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4024</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX7279</td>\n",
       "      <td>[Mn+2]</td>\n",
       "      <td>[Mn+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4375</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX4603</td>\n",
       "      <td>[Be+2]</td>\n",
       "      <td>[Be+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4611</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX1461</td>\n",
       "      <td>[Zn+2]</td>\n",
       "      <td>[Zn+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5942</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX14903</td>\n",
       "      <td>[Br-].[Na+]</td>\n",
       "      <td>[Br-].[Na+]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6477</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX235</td>\n",
       "      <td>[Ca+2].[Cl-].[Cl-]</td>\n",
       "      <td>[Ca+2].[Cl-].[Cl-]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6547</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX9283</td>\n",
       "      <td>[SbH6+3]</td>\n",
       "      <td>[SbH6+3]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6717</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX928</td>\n",
       "      <td>[Ni+2]</td>\n",
       "      <td>[Ni+2]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7407</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>TOX21126</td>\n",
       "      <td>N#C[Fe-2](C#N)(C#N)(C#N)(C#N)N=O</td>\n",
       "      <td>N#C[Fe-2](C#N)(C#N)(C#N)(C#N)N=O</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      NR-AR  NR-AR-LBD  NR-AhR  NR-Aromatase  NR-ER  NR-ER-LBD  NR-PPAR-gamma  \\\n",
       "95      0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "255     0.0        1.0     1.0           NaN    NaN        1.0            1.0   \n",
       "659     0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "985     0.0        0.0     0.0           1.0    0.0        0.0            NaN   \n",
       "1423    0.0        0.0     0.0           NaN    0.0        0.0            0.0   \n",
       "1534    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "1722    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "1933    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "2147    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "2251    0.0        NaN     0.0           0.0    NaN        0.0            0.0   \n",
       "2760    0.0        NaN     1.0           NaN    0.0        NaN            NaN   \n",
       "2832    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "4024    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "4375    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "4611    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "5942    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "6477    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "6547    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "6717    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "7407    0.0        0.0     0.0           0.0    0.0        0.0            0.0   \n",
       "\n",
       "      SR-ARE  SR-ATAD5  SR-HSE  SR-MMP  SR-p53    mol_id  \\\n",
       "95       0.0       0.0     0.0     0.0     0.0  TOX14836   \n",
       "255      NaN       NaN     1.0     NaN     1.0    TOX811   \n",
       "659      0.0       0.0     0.0     1.0     0.0    TOX131   \n",
       "985      0.0       0.0     0.0     0.0     NaN   TOX4331   \n",
       "1423     0.0       0.0     0.0     0.0     0.0    TOX327   \n",
       "1534     NaN       0.0     0.0     0.0     0.0   TOX9688   \n",
       "1722     1.0       0.0     0.0     0.0     0.0    TOX340   \n",
       "1933     1.0       0.0     0.0     0.0     0.0  TOX11521   \n",
       "2147     0.0       0.0     0.0     0.0     0.0    TOX622   \n",
       "2251     NaN       0.0     1.0     NaN     0.0  TOX20449   \n",
       "2760     NaN       1.0     1.0     0.0     NaN    TOX226   \n",
       "2832     0.0       0.0     0.0     0.0     0.0   TOX1351   \n",
       "4024     1.0       NaN     0.0     0.0     0.0   TOX7279   \n",
       "4375     0.0       0.0     0.0     NaN     0.0   TOX4603   \n",
       "4611     0.0       0.0     0.0     0.0     0.0   TOX1461   \n",
       "5942     0.0       0.0     0.0     0.0     0.0  TOX14903   \n",
       "6477     0.0       0.0     0.0     0.0     0.0    TOX235   \n",
       "6547     0.0       0.0     0.0     NaN     0.0   TOX9283   \n",
       "6717     0.0       0.0     0.0     NaN     0.0    TOX928   \n",
       "7407     1.0       0.0     0.0     0.0     0.0  TOX21126   \n",
       "\n",
       "                                smiles                       cano_smiles  \n",
       "95                           [I-].[K+]                         [I-].[K+]  \n",
       "255                             [Hg+2]                            [Hg+2]  \n",
       "659                             [Ba+2]                            [Ba+2]  \n",
       "985                            [TlH2+]                           [TlH2+]  \n",
       "1423                            [Cr+3]                            [Cr+3]  \n",
       "1534                            [Fe+2]                            [Fe+2]  \n",
       "1722                            [Co+2]                            [Co+2]  \n",
       "1933                          [PbH2+2]                          [PbH2+2]  \n",
       "2147                            [Fe+3]                            [Fe+3]  \n",
       "2251                            [Cu+2]                            [Cu+2]  \n",
       "2760                            [Cd+2]                            [Cd+2]  \n",
       "2832                          [SnH2+2]                          [SnH2+2]  \n",
       "4024                            [Mn+2]                            [Mn+2]  \n",
       "4375                            [Be+2]                            [Be+2]  \n",
       "4611                            [Zn+2]                            [Zn+2]  \n",
       "5942                       [Br-].[Na+]                       [Br-].[Na+]  \n",
       "6477                [Ca+2].[Cl-].[Cl-]                [Ca+2].[Cl-].[Cl-]  \n",
       "6547                          [SbH6+3]                          [SbH6+3]  \n",
       "6717                            [Ni+2]                            [Ni+2]  \n",
       "7407  N#C[Fe-2](C#N)(C#N)(C#N)(C#N)N=O  N#C[Fe-2](C#N)(C#N)(C#N)(C#N)N=O  "
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "if os.path.isfile(feature_filename):\n",
    "    feature_dicts = pickle.load(open(feature_filename, \"rb\" ))\n",
    "else:\n",
    "    feature_dicts = save_smiles_dicts(smilesList,filename)\n",
    "# feature_dicts = get_smiles_dicts(smilesList)\n",
    "\n",
    "remained_df = smiles_tasks_df[smiles_tasks_df[\"cano_smiles\"].isin(feature_dicts['smiles_to_atom_mask'].keys())]\n",
    "uncovered_df = smiles_tasks_df.drop(remained_df.index)\n",
    "uncovered_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "weights = []\n",
    "for i,task in enumerate(tasks):    \n",
    "    negative_df = remained_df[remained_df[task] == 0][[\"smiles\",task]]\n",
    "    positive_df = remained_df[remained_df[task] == 1][[\"smiles\",task]]\n",
    "    weights.append([(positive_df.shape[0]+negative_df.shape[0])/negative_df.shape[0],\\\n",
    "                    (positive_df.shape[0]+negative_df.shape[0])/positive_df.shape[0]])\n",
    "\n",
    "test_df = remained_df.sample(frac=1/10, random_state=random_seed) # test set\n",
    "training_data = remained_df.drop(test_df.index) # training data\n",
    "\n",
    "# training data is further divided into validation set and train set\n",
    "valid_df = training_data.sample(frac=1/9, random_state=random_seed) # validation set\n",
    "train_df = training_data.drop(valid_df.index) # train set\n",
    "train_df = train_df.reset_index(drop=True)\n",
    "valid_df = valid_df.reset_index(drop=True)\n",
    "test_df = test_df.reset_index(drop=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1150028\n",
      "atom_fc.weight torch.Size([200, 39])\n",
      "atom_fc.bias torch.Size([200])\n",
      "neighbor_fc.weight torch.Size([200, 49])\n",
      "neighbor_fc.bias torch.Size([200])\n",
      "GRUCell.0.weight_ih torch.Size([600, 200])\n",
      "GRUCell.0.weight_hh torch.Size([600, 200])\n",
      "GRUCell.0.bias_ih torch.Size([600])\n",
      "GRUCell.0.bias_hh torch.Size([600])\n",
      "GRUCell.1.weight_ih torch.Size([600, 200])\n",
      "GRUCell.1.weight_hh torch.Size([600, 200])\n",
      "GRUCell.1.bias_ih torch.Size([600])\n",
      "GRUCell.1.bias_hh torch.Size([600])\n",
      "GRUCell.2.weight_ih torch.Size([600, 200])\n",
      "GRUCell.2.weight_hh torch.Size([600, 200])\n",
      "GRUCell.2.bias_ih torch.Size([600])\n",
      "GRUCell.2.bias_hh torch.Size([600])\n",
      "align.0.weight torch.Size([1, 400])\n",
      "align.0.bias torch.Size([1])\n",
      "align.1.weight torch.Size([1, 400])\n",
      "align.1.bias torch.Size([1])\n",
      "align.2.weight torch.Size([1, 400])\n",
      "align.2.bias torch.Size([1])\n",
      "attend.0.weight torch.Size([200, 200])\n",
      "attend.0.bias torch.Size([200])\n",
      "attend.1.weight torch.Size([200, 200])\n",
      "attend.1.bias torch.Size([200])\n",
      "attend.2.weight torch.Size([200, 200])\n",
      "attend.2.bias torch.Size([200])\n",
      "mol_GRUCell.weight_ih torch.Size([600, 200])\n",
      "mol_GRUCell.weight_hh torch.Size([600, 200])\n",
      "mol_GRUCell.bias_ih torch.Size([600])\n",
      "mol_GRUCell.bias_hh torch.Size([600])\n",
      "mol_align.weight torch.Size([1, 400])\n",
      "mol_align.bias torch.Size([1])\n",
      "mol_attend.weight torch.Size([200, 200])\n",
      "mol_attend.bias torch.Size([200])\n",
      "output.weight torch.Size([24, 200])\n",
      "output.bias torch.Size([24])\n"
     ]
    }
   ],
   "source": [
    "x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array([smilesList[0]],feature_dicts)\n",
    "num_atom_features = x_atom.shape[-1]\n",
    "num_bond_features = x_bonds.shape[-1]\n",
    "\n",
    "loss_function = [nn.CrossEntropyLoss(torch.Tensor(weight),reduction='mean') for weight in weights]\n",
    "model = Fingerprint(radius, T, num_atom_features,num_bond_features,\n",
    "            fingerprint_dim, output_units_num, p_dropout)\n",
    "model.cuda()\n",
    "# tensorboard = SummaryWriter(log_dir=\"runs/\"+start_time+\"_\"+prefix_filename+\"_\"+str(fingerprint_dim)+\"_\"+str(p_dropout))\n",
    "\n",
    "# optimizer = optim.Adam(model.parameters(), learning_rate, weight_decay=weight_decay)\n",
    "optimizer = optim.Adam(model.parameters(), 10**-learning_rate, weight_decay=10**-weight_decay)\n",
    "model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n",
    "params = sum([np.prod(p.size()) for p in model_parameters])\n",
    "print(params)\n",
    "for name, param in model.named_parameters():\n",
    "    if param.requires_grad:\n",
    "        print(name, param.data.shape)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model, dataset, optimizer, loss_function):\n",
    "    model.train()\n",
    "    np.random.seed(epoch)\n",
    "    valList = np.arange(0,dataset.shape[0])\n",
    "    #shuffle them\n",
    "    np.random.shuffle(valList)\n",
    "    batch_list = []\n",
    "    for i in range(0, dataset.shape[0], batch_size):\n",
    "        batch = valList[i:i+batch_size]\n",
    "        batch_list.append(batch)   \n",
    "    for counter, train_batch in enumerate(batch_list):\n",
    "        batch_df = dataset.loc[train_batch,:]\n",
    "        smiles_list = batch_df.cano_smiles.values\n",
    "        \n",
    "        x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(smiles_list,feature_dicts)\n",
    "        atoms_prediction, mol_prediction = model(torch.Tensor(x_atom),torch.Tensor(x_bonds),torch.cuda.LongTensor(x_atom_index),torch.cuda.LongTensor(x_bond_index),torch.Tensor(x_mask))\n",
    "#         print(torch.Tensor(x_atom).size(),torch.Tensor(x_bonds).size(),torch.cuda.LongTensor(x_atom_index).size(),torch.cuda.LongTensor(x_bond_index).size(),torch.Tensor(x_mask).size())\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        loss = 0.0\n",
    "        for i,task in enumerate(tasks):\n",
    "            y_pred = mol_prediction[:, i * per_task_output_units_num:(i + 1) *\n",
    "                                    per_task_output_units_num]\n",
    "            y_val = batch_df[task].values\n",
    "\n",
    "            validInds = np.where((y_val==0) | (y_val==1))[0]\n",
    "#             validInds = np.where(y_val != -1)[0]\n",
    "            if len(validInds) == 0:\n",
    "                continue\n",
    "            y_val_adjust = np.array([y_val[v] for v in validInds]).astype(float)\n",
    "            validInds = torch.cuda.LongTensor(validInds).squeeze()\n",
    "            y_pred_adjust = torch.index_select(y_pred, 0, validInds)\n",
    "\n",
    "            loss += loss_function[i](\n",
    "                y_pred_adjust,\n",
    "                torch.cuda.LongTensor(y_val_adjust))\n",
    "        # Step 5. Do the backward pass and update the gradient\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "def eval(model, dataset):\n",
    "    model.eval()\n",
    "    y_val_list = {}\n",
    "    y_pred_list = {}\n",
    "    losses_list = []\n",
    "    valList = np.arange(0,dataset.shape[0])\n",
    "    batch_list = []\n",
    "    for i in range(0, dataset.shape[0], batch_size):\n",
    "        batch = valList[i:i+batch_size]\n",
    "        batch_list.append(batch)   \n",
    "    for counter, eval_batch in enumerate(batch_list):\n",
    "        batch_df = dataset.loc[eval_batch,:]\n",
    "        smiles_list = batch_df.cano_smiles.values\n",
    "        \n",
    "        x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(smiles_list,feature_dicts)\n",
    "        atoms_prediction, mol_prediction = model(torch.Tensor(x_atom),torch.Tensor(x_bonds),torch.cuda.LongTensor(x_atom_index),torch.cuda.LongTensor(x_bond_index),torch.Tensor(x_mask))\n",
    "        atom_pred = atoms_prediction.data[:,:,1].unsqueeze(2).cpu().numpy()\n",
    "        for i,task in enumerate(tasks):\n",
    "            y_pred = mol_prediction[:, i * per_task_output_units_num:(i + 1) *\n",
    "                                    per_task_output_units_num]\n",
    "            y_val = batch_df[task].values\n",
    "\n",
    "            validInds = np.where((y_val==0) | (y_val==1))[0]\n",
    "#             validInds = np.where((y_val=='0') | (y_val=='1'))[0]\n",
    "#             print(validInds)\n",
    "            if len(validInds) == 0:\n",
    "                continue\n",
    "            y_val_adjust = np.array([y_val[v] for v in validInds]).astype(float)\n",
    "            validInds = torch.cuda.LongTensor(validInds).squeeze()\n",
    "            y_pred_adjust = torch.index_select(y_pred, 0, validInds)\n",
    "#             print(validInds)\n",
    "            loss = loss_function[i](\n",
    "                y_pred_adjust,\n",
    "                torch.cuda.LongTensor(y_val_adjust))\n",
    "#             print(y_pred_adjust)\n",
    "            y_pred_adjust = F.softmax(y_pred_adjust,dim=-1).data.cpu().numpy()[:,1]\n",
    "            losses_list.append(loss.cpu().detach().numpy())\n",
    "            try:\n",
    "                y_val_list[i].extend(y_val_adjust)\n",
    "                y_pred_list[i].extend(y_pred_adjust)\n",
    "            except:\n",
    "                y_val_list[i] = []\n",
    "                y_pred_list[i] = []\n",
    "                y_val_list[i].extend(y_val_adjust)\n",
    "                y_pred_list[i].extend(y_pred_adjust)\n",
    "                \n",
    "    eval_roc = [roc_auc_score(y_val_list[i], y_pred_list[i]) for i in range(len(tasks))]\n",
    "#     eval_prc = [auc(precision_recall_curve(y_val_list[i], y_pred_list[i])[1],precision_recall_curve(y_val_list[i], y_pred_list[i])[0]) for i in range(len(tasks))]\n",
    "#     eval_precision = [precision_score(y_val_list[i],\n",
    "#                                      (np.array(y_pred_list[i]) > 0.5).astype(int)) for i in range(len(tasks))]\n",
    "#     eval_recall = [recall_score(y_val_list[i],\n",
    "#                                (np.array(y_pred_list[i]) > 0.5).astype(int)) for i in range(len(tasks))]\n",
    "    eval_loss = np.array(losses_list).mean()\n",
    "    \n",
    "    return eval_roc, eval_loss #eval_prc, eval_precision, eval_recall, \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "EPOCH:\t0\n",
      "train_roc_mean:0.44824940485806714\n",
      "valid_roc_mean:0.45839218196250414\n",
      "\n",
      "EPOCH:\t1\n",
      "train_roc_mean:0.6912163844134013\n",
      "valid_roc_mean:0.6917160893748351\n",
      "\n",
      "EPOCH:\t2\n",
      "train_roc_mean:0.7284853506625054\n",
      "valid_roc_mean:0.7221997811497229\n",
      "\n",
      "EPOCH:\t3\n",
      "train_roc_mean:0.7410893656275936\n",
      "valid_roc_mean:0.7319440773021492\n",
      "\n",
      "EPOCH:\t4\n",
      "train_roc_mean:0.7491323253679213\n",
      "valid_roc_mean:0.7355131982318337\n",
      "\n",
      "EPOCH:\t5\n",
      "train_roc_mean:0.7571786085972886\n",
      "valid_roc_mean:0.7413717400800796\n",
      "\n",
      "EPOCH:\t6\n",
      "train_roc_mean:0.7654615795963985\n",
      "valid_roc_mean:0.7489100277525716\n",
      "\n",
      "EPOCH:\t7\n",
      "train_roc_mean:0.7794215946182631\n",
      "valid_roc_mean:0.7552061883914126\n",
      "\n",
      "EPOCH:\t8\n",
      "train_roc_mean:0.7890600751290481\n",
      "valid_roc_mean:0.7608711389018179\n",
      "\n",
      "EPOCH:\t9\n",
      "train_roc_mean:0.7995430291595956\n",
      "valid_roc_mean:0.7711636298620984\n",
      "\n",
      "EPOCH:\t10\n",
      "train_roc_mean:0.8113532357374975\n",
      "valid_roc_mean:0.7824094409120989\n",
      "\n",
      "EPOCH:\t11\n",
      "train_roc_mean:0.8180314524094889\n",
      "valid_roc_mean:0.7887667911766235\n",
      "\n",
      "EPOCH:\t12\n",
      "train_roc_mean:0.8248393290090571\n",
      "valid_roc_mean:0.799890751434213\n",
      "\n",
      "EPOCH:\t13\n",
      "train_roc_mean:0.8309402301732066\n",
      "valid_roc_mean:0.8063726615014558\n",
      "\n",
      "EPOCH:\t14\n",
      "train_roc_mean:0.8372065319965718\n",
      "valid_roc_mean:0.8083585428886345\n",
      "\n",
      "EPOCH:\t15\n",
      "train_roc_mean:0.8416280397669932\n",
      "valid_roc_mean:0.8162985914700919\n",
      "\n",
      "EPOCH:\t16\n",
      "train_roc_mean:0.8437546836298925\n",
      "valid_roc_mean:0.8255348228202347\n",
      "\n",
      "EPOCH:\t17\n",
      "train_roc_mean:0.8475209953453225\n",
      "valid_roc_mean:0.8248691334669128\n",
      "\n",
      "EPOCH:\t18\n",
      "train_roc_mean:0.8528378224664436\n",
      "valid_roc_mean:0.8296902183129623\n",
      "\n",
      "EPOCH:\t19\n",
      "train_roc_mean:0.8539969504977153\n",
      "valid_roc_mean:0.831893239868167\n",
      "\n",
      "EPOCH:\t20\n",
      "train_roc_mean:0.8547484963766722\n",
      "valid_roc_mean:0.8319672426983326\n",
      "\n",
      "EPOCH:\t21\n",
      "train_roc_mean:0.8600386412470701\n",
      "valid_roc_mean:0.8388894070801202\n",
      "\n",
      "EPOCH:\t22\n",
      "train_roc_mean:0.8628317399766138\n",
      "valid_roc_mean:0.8358483774320605\n",
      "\n",
      "EPOCH:\t23\n",
      "train_roc_mean:0.8658774025575292\n",
      "valid_roc_mean:0.8404495142110724\n",
      "\n",
      "EPOCH:\t24\n",
      "train_roc_mean:0.8686140915620286\n",
      "valid_roc_mean:0.8409025721146962\n",
      "\n",
      "EPOCH:\t25\n",
      "train_roc_mean:0.8703006931949194\n",
      "valid_roc_mean:0.8427384259621277\n",
      "\n",
      "EPOCH:\t26\n",
      "train_roc_mean:0.8739098202769023\n",
      "valid_roc_mean:0.8451277084892094\n",
      "\n",
      "EPOCH:\t27\n",
      "train_roc_mean:0.8727796475085045\n",
      "valid_roc_mean:0.8440519502656484\n",
      "\n",
      "EPOCH:\t28\n",
      "train_roc_mean:0.8765032531679747\n",
      "valid_roc_mean:0.8421473367993069\n",
      "\n",
      "EPOCH:\t29\n",
      "train_roc_mean:0.8798156921810153\n",
      "valid_roc_mean:0.8489892107353523\n",
      "\n",
      "EPOCH:\t30\n",
      "train_roc_mean:0.8788285709295062\n",
      "valid_roc_mean:0.8463907365564444\n",
      "\n",
      "EPOCH:\t31\n",
      "train_roc_mean:0.8821241960928684\n",
      "valid_roc_mean:0.8408598525590741\n",
      "\n",
      "EPOCH:\t32\n",
      "train_roc_mean:0.8852981645113065\n",
      "valid_roc_mean:0.8473934154209594\n",
      "\n",
      "EPOCH:\t33\n",
      "train_roc_mean:0.8884717191722089\n",
      "valid_roc_mean:0.8444147505835016\n",
      "\n",
      "EPOCH:\t34\n",
      "train_roc_mean:0.8901893031956484\n",
      "valid_roc_mean:0.8492728135166315\n",
      "\n",
      "EPOCH:\t35\n",
      "train_roc_mean:0.8896978995626833\n",
      "valid_roc_mean:0.8465027864013203\n",
      "\n",
      "EPOCH:\t36\n",
      "train_roc_mean:0.8927308173563634\n",
      "valid_roc_mean:0.8515376869695599\n",
      "\n",
      "EPOCH:\t37\n",
      "train_roc_mean:0.8922752071061716\n",
      "valid_roc_mean:0.8482759741588465\n",
      "\n",
      "EPOCH:\t38\n",
      "train_roc_mean:0.8959140747555129\n",
      "valid_roc_mean:0.8511624919736277\n",
      "\n",
      "EPOCH:\t39\n",
      "train_roc_mean:0.8969768343002711\n",
      "valid_roc_mean:0.8558762058102678\n",
      "\n",
      "EPOCH:\t40\n",
      "train_roc_mean:0.8980910016548483\n",
      "valid_roc_mean:0.8499505735764895\n",
      "\n",
      "EPOCH:\t41\n",
      "train_roc_mean:0.901064461894674\n",
      "valid_roc_mean:0.8530136662593221\n",
      "\n",
      "EPOCH:\t42\n",
      "train_roc_mean:0.9015821884454018\n",
      "valid_roc_mean:0.8481124957388619\n",
      "\n",
      "EPOCH:\t43\n",
      "train_roc_mean:0.9034321208154551\n",
      "valid_roc_mean:0.8491688763508464\n",
      "\n",
      "EPOCH:\t44\n",
      "train_roc_mean:0.9054488890693704\n",
      "valid_roc_mean:0.8507572877419509\n",
      "\n",
      "EPOCH:\t45\n",
      "train_roc_mean:0.9070088250892793\n",
      "valid_roc_mean:0.8526212132147551\n",
      "\n",
      "EPOCH:\t46\n",
      "train_roc_mean:0.9099722765254504\n",
      "valid_roc_mean:0.8544067960630128\n",
      "\n",
      "EPOCH:\t47\n",
      "train_roc_mean:0.9081717603893663\n",
      "valid_roc_mean:0.8546219703590667\n",
      "\n",
      "EPOCH:\t48\n",
      "train_roc_mean:0.9096599485505411\n",
      "valid_roc_mean:0.8543118079756485\n",
      "\n",
      "EPOCH:\t49\n",
      "train_roc_mean:0.9123057860281932\n",
      "valid_roc_mean:0.8579580489439729\n",
      "\n",
      "EPOCH:\t50\n",
      "train_roc_mean:0.9145890940421442\n",
      "valid_roc_mean:0.858344173506464\n",
      "\n",
      "EPOCH:\t51\n",
      "train_roc_mean:0.9151823444807805\n",
      "valid_roc_mean:0.8554716127910735\n",
      "\n",
      "EPOCH:\t52\n",
      "train_roc_mean:0.9165280634108108\n",
      "valid_roc_mean:0.8544041073757693\n",
      "\n",
      "EPOCH:\t53\n",
      "train_roc_mean:0.9166776088993879\n",
      "valid_roc_mean:0.8579464184704958\n",
      "\n",
      "EPOCH:\t54\n",
      "train_roc_mean:0.9202356639285858\n",
      "valid_roc_mean:0.8538290548065542\n",
      "\n",
      "EPOCH:\t55\n",
      "train_roc_mean:0.9221567578997713\n",
      "valid_roc_mean:0.8578957743994057\n",
      "\n",
      "EPOCH:\t56\n",
      "train_roc_mean:0.9244352611523143\n",
      "valid_roc_mean:0.8550299854659283\n",
      "\n",
      "EPOCH:\t57\n",
      "train_roc_mean:0.9249228300103188\n",
      "valid_roc_mean:0.8556339559357506\n",
      "\n",
      "EPOCH:\t58\n",
      "train_roc_mean:0.9262071878916313\n",
      "valid_roc_mean:0.8586876929181476\n",
      "\n",
      "EPOCH:\t59\n",
      "train_roc_mean:0.926388346202342\n",
      "valid_roc_mean:0.8577898480812198\n",
      "\n",
      "EPOCH:\t60\n",
      "train_roc_mean:0.9290113006150053\n",
      "valid_roc_mean:0.8554448352177763\n",
      "\n",
      "EPOCH:\t61\n",
      "train_roc_mean:0.9287276984452131\n",
      "valid_roc_mean:0.8609781369241546\n",
      "\n",
      "EPOCH:\t62\n",
      "train_roc_mean:0.9297878198615016\n",
      "valid_roc_mean:0.8561691024782886\n",
      "\n",
      "EPOCH:\t63\n",
      "train_roc_mean:0.9316816222423264\n",
      "valid_roc_mean:0.8612986230906851\n",
      "\n",
      "EPOCH:\t64\n",
      "train_roc_mean:0.9333312376982897\n",
      "valid_roc_mean:0.8584946688320562\n",
      "\n",
      "EPOCH:\t65\n",
      "train_roc_mean:0.9338786568803347\n",
      "valid_roc_mean:0.8588833704288045\n",
      "\n",
      "EPOCH:\t66\n",
      "train_roc_mean:0.9350715318407707\n",
      "valid_roc_mean:0.8593220739589972\n",
      "\n",
      "EPOCH:\t67\n",
      "train_roc_mean:0.9320028634508956\n",
      "valid_roc_mean:0.8545960879026193\n",
      "\n",
      "EPOCH:\t68\n",
      "train_roc_mean:0.9384820920643357\n",
      "valid_roc_mean:0.8511899696946665\n",
      "\n",
      "EPOCH:\t69\n",
      "train_roc_mean:0.9385332898606155\n",
      "valid_roc_mean:0.8622202668021628\n",
      "\n",
      "EPOCH:\t70\n",
      "train_roc_mean:0.9389180660776598\n",
      "valid_roc_mean:0.8542630681343516\n",
      "\n",
      "EPOCH:\t71\n",
      "train_roc_mean:0.9406505283997548\n",
      "valid_roc_mean:0.8585502132354791\n",
      "\n",
      "EPOCH:\t72\n",
      "train_roc_mean:0.9407716566020711\n",
      "valid_roc_mean:0.853890468228666\n",
      "\n",
      "EPOCH:\t73\n",
      "train_roc_mean:0.9441507419017765\n",
      "valid_roc_mean:0.8542524342420443\n",
      "\n",
      "EPOCH:\t74\n",
      "train_roc_mean:0.9443513617307541\n",
      "valid_roc_mean:0.8587381720219164\n",
      "\n",
      "EPOCH:\t75\n",
      "train_roc_mean:0.9464339809070778\n",
      "valid_roc_mean:0.8602836805418591\n",
      "\n",
      "EPOCH:\t76\n",
      "train_roc_mean:0.9457367795059223\n",
      "valid_roc_mean:0.8600099044725945\n",
      "\n",
      "EPOCH:\t77\n",
      "train_roc_mean:0.9464578177906158\n",
      "valid_roc_mean:0.8532007618553455\n",
      "\n",
      "EPOCH:\t78\n",
      "train_roc_mean:0.9500172337165668\n",
      "valid_roc_mean:0.8545930069210891\n",
      "\n",
      "EPOCH:\t79\n",
      "train_roc_mean:0.9495629289254072\n",
      "valid_roc_mean:0.8576658043865147\n",
      "\n",
      "EPOCH:\t80\n",
      "train_roc_mean:0.9490574474137471\n",
      "valid_roc_mean:0.8567242329554489\n",
      "\n"
     ]
    }
   ],
   "source": [
    "best_param ={}\n",
    "best_param[\"roc_epoch\"] = 0\n",
    "best_param[\"loss_epoch\"] = 0\n",
    "best_param[\"valid_roc\"] = 0\n",
    "best_param[\"valid_loss\"] = 9e8\n",
    "\n",
    "for epoch in range(epochs):    \n",
    "    train_roc, train_loss = eval(model, train_df)\n",
    "    valid_roc, valid_loss = eval(model, valid_df)\n",
    "    train_roc_mean = np.array(train_roc).mean()\n",
    "    valid_roc_mean = np.array(valid_roc).mean()\n",
    "    \n",
    "#     tensorboard.add_scalars('ROC',{'train_roc':train_roc_mean,'valid_roc':valid_roc_mean},epoch)\n",
    "#     tensorboard.add_scalars('Losses',{'train_losses':train_loss,'valid_losses':valid_loss},epoch)\n",
    "\n",
    "    if valid_roc_mean > best_param[\"valid_roc\"]:\n",
    "        best_param[\"roc_epoch\"] = epoch\n",
    "        best_param[\"valid_roc\"] = valid_roc_mean\n",
    "        if valid_roc_mean > 0.85:\n",
    "             torch.save(model, 'saved_models/model_'+prefix_filename+'_'+start_time+'_'+str(epoch)+'.pt')             \n",
    "    if valid_loss < best_param[\"valid_loss\"]:\n",
    "        best_param[\"loss_epoch\"] = epoch\n",
    "        best_param[\"valid_loss\"] = valid_loss\n",
    "\n",
    "    print(\"EPOCH:\\t\"+str(epoch)+'\\n'\\\n",
    "#         +\"train_roc\"+\":\"+str(train_roc)+'\\n'\\\n",
    "#         +\"valid_roc\"+\":\"+str(valid_roc)+'\\n'\\\n",
    "        +\"train_roc_mean\"+\":\"+str(train_roc_mean)+'\\n'\\\n",
    "        +\"valid_roc_mean\"+\":\"+str(valid_roc_mean)+'\\n'\\\n",
    "        )\n",
    "    if (epoch - best_param[\"roc_epoch\"] >10) and (epoch - best_param[\"loss_epoch\"] >20):        \n",
    "        break\n",
    "        \n",
    "    train(model, train_df, optimizer, loss_function)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "best epoch:69\n",
      "test_roc:[0.8169696450008574, 0.9088834915728269, 0.899473361910594, 0.8324078748107017, 0.73659281894576, 0.8139835375191424, 0.88207928802589, 0.8449413808076421, 0.876528442317916, 0.7674418604651163, 0.9043071161048689, 0.8514150943396227]\n",
      "test_roc_mean: 0.8445853259850783\n"
     ]
    }
   ],
   "source": [
    "# evaluate model\n",
    "best_model = torch.load('saved_models/model_'+prefix_filename+'_'+start_time+'_'+str(best_param[\"roc_epoch\"])+'.pt')     \n",
    "\n",
    "best_model_dict = best_model.state_dict()\n",
    "best_model_wts = copy.deepcopy(best_model_dict)\n",
    "\n",
    "model.load_state_dict(best_model_wts)\n",
    "(best_model.align[0].weight == model.align[0].weight).all()\n",
    "test_roc, test_losses = eval(model, test_df)\n",
    "\n",
    "print(\"best epoch:\"+str(best_param[\"roc_epoch\"])\n",
    "      +\"\\n\"+\"test_roc:\"+str(test_roc)\n",
    "      +\"\\n\"+\"test_roc_mean:\",str(np.array(test_roc).mean())\n",
    "     )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
