{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import torch\n",
    "import torch.autograd as autograd\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torch.utils.data as Data\n",
    "torch.manual_seed(8)\n",
    "\n",
    "import time\n",
    "import numpy as np\n",
    "import gc\n",
    "import sys\n",
    "sys.setrecursionlimit(50000)\n",
    "import pickle\n",
    "torch.backends.cudnn.benchmark = True\n",
    "torch.set_default_tensor_type('torch.cuda.FloatTensor')\n",
    "# from tensorboardX import SummaryWriter\n",
    "torch.nn.Module.dump_patches = True\n",
    "import copy\n",
    "import pandas as pd\n",
    "#then import my own modules\n",
    "from AttentiveFP import Fingerprint, Fingerprint_viz, save_smiles_dicts, get_smiles_dicts, get_smiles_array, moltosvg_highlight"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from rdkit import Chem\n",
    "# from rdkit.Chem import AllChem\n",
    "from rdkit.Chem import QED\n",
    "from rdkit.Chem import rdMolDescriptors, MolSurf\n",
    "from rdkit.Chem.Draw import SimilarityMaps\n",
    "from rdkit import Chem\n",
    "from rdkit.Chem import AllChem\n",
    "from rdkit.Chem import rdDepictor\n",
    "from rdkit.Chem.Draw import rdMolDraw2D\n",
    "%matplotlib inline\n",
    "from numpy.polynomial.polynomial import polyfit\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.cm as cm\n",
    "import matplotlib\n",
    "import seaborn as sns; sns.set()\n",
    "from IPython.display import SVG, display\n",
    "import sascorer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "number of all smiles:  29978\n",
      "number of successfully processed smiles:  29978\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAU8AAAC/CAYAAAB+KF5fAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAFE5JREFUeJzt3XtQVOX/B/D3LqgpV7/KF8ZLSS5LKUL9jMELA8lNx6YQMp2pvIwYTRMNSjA6kTo1VpYEKshgmtNoo1PTBSUbGYSGJsVLmkaa7applsOG4rIKEz9gz++Pfqyue39kz+7C+zXjCOc8++zn4YH3nj1nzzkKSZIkEBGRS5SeLoCIyBcxPImIBDA8iYgEMDyJiAQwPImIBDA8iYgEMDyJiAQwPImIBDA8iYgEMDyJiAQwPImIBDA8iYgEMDyJiAT4e7oAud282QGj0XMXkho1KhA3btz22PO700Ad20AdFzBwx+bKuJRKBUaODHD5OQZdeBqNkkfDs6+GgWqgjm2gjgsYuGNz97iE3raXl5cjOjoamZmZFusOHz6MBQsWIDY2FtOnT8fatWthMBgs2nV0dGD9+vVITExEbGwssrOzUV9fb/X5nO2TiEguLoenVqvF9u3bMXr0aIt1x44dQ25uLiIiIlBVVYVVq1ahoaEBubm5MBqNZm3z8vJQU1OD/Px8bNu2DSqVCnl5eWhsbBTuk4hILi69bTcajSguLsZzzz0HjUZjsfW3ceNGREVFYdOmTVAq/83lsLAwLFu2DAcPHsTcuXMBAI2NjThy5AgqKiqQnp4OAJg2bRquXr2KDRs2IDk52eU+iYjk5NKW5yeffIKWlhasXLnSYp1Op0NzczMyMzNNIQcAM2fORHh4OGpra03L6urqEBQUhNTUVNMyhUKBrKwsXLp0CRcuXHC5TyIiOTm95Xn16lVs2bIFJSUlCAwMtFiv0WgAAFFRURbr1Go1tFqt6XutVguVSmUWiAAQHR1t6kulUrnUJ9FA1WMEurp77LYZNsQf/vzgoaycCk9JkvDmm28iMTERaWlpVtvo9XoAQEhIiMW6kJAQnDt3zqzthAkTrLa7uy9X+nTWqFGWwS+3sLAgT5fgNgN1bJ4c199tnTh/6YbdNv8T/V+E/WeEUP+cMzFOhefnn3+OX375Bd9++63DtgqFwqnlttq50tZeH7bcuHHbox/NCAsLQmvrLY89vzsN1LF5elydXT24dfsf+206u9Da2+ty354em7u4Mi6lUiG0UeUwPNva2rBx40a8/PLLGD58uOkgUU9PD4xGIwwGA4YNG4bQ0FAAd7YW79be3m629RgaGmqzHXBnS9OVPomI5ORwL4lOp8OtW7fw4YcfIj4+3vTv1KlT0Gg0iI+PR3l5uWm/pLX9kBqNxmy/pUqlwsWLFy0+atS3j1OtVgOAS30SEcnJ4Zbngw8+iF27dlksf/fdd9HZ2Yn169djzJgxiIiIQExMDGpqarBkyRLTwaCmpibodDpkZGSYHpueno4vvvgCDQ0NZvtQq6urERkZCZVKBQAu9UlEJCeH4RkQEICEhASL5cHBwQBgtq6wsBA5OTkoKCjAwoULodPpUFJSgri4OMyZM8fULjk5GQkJCSguLoZer8e4ceNQXV2NkydPorKy0ux5nO2TiEhO/Xpu+/Tp01FVVYXy8nLk5uYiICAAaWlpKCoqgp+fn6mdQqFAZWUlSktLUVZWBoPBAJVKhYqKCqSkpAj1SUQkJ4UkSQPzqgA28Gi7+wzUsXl6XB1dPTjxq85um/hHwxEwzPVtIU+PzV3kONrOj9USEQlgeBIRCWB4EhEJGHQXQyYi63gOvWsYnkQE4N/gdObAlL/AgamBiK8hREQCGJ5ERAIYnkREAhieREQCGJ5ERAIYnkREAhieREQCGJ5ERAIYnkREAhieREQCGJ5ERAIYnkREAhieREQCeHkUIjfiZd4GLoYnkRvxMm8DF1/viIgEMDyJiAQwPImIBDA8iYgEMDyJiAQwPImIBDA8iYgEMDyJiAQwPImIBPC0BqIBQKFUoKPL9mmgPAW0/zE8iQaAru5enNG02lwv1ymgg+lcfoYnEfWbwXQu/wDIfyIi+TE8iYgEMDyJiAQwPImIBDA8iYgE+P4hLyJyyNbnQKW2TnT+/3KjJHdVvo3hSTQI2PocaFDgA7h1+x8AQJw6TO6yfBrfthMRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJcBieTU1NWL16NWbPno24uDgkJSUhLy8Pv/32m0Xbw4cPY8GCBYiNjcX06dOxdu1aGAwGi3YdHR1Yv349EhMTERsbi+zsbNTX11t9fmf7JCKSk8Pw3Lt3L65du4alS5di+/btWL16Na5du4b58+fj9OnTpnbHjh1Dbm4uIiIiUFVVhVWrVqGhoQG5ubkwGo1mfebl5aGmpgb5+fnYtm0bVCoV8vLy0NjYaNbOlT6JXNVjBDq6emz+6+GvGNnh8B5G69atw6hRo8yWJSYmIjU1FR9//DHKy8sBABs3bkRUVBQ2bdoEpfLfTA4LC8OyZctw8OBBzJ07FwDQ2NiII0eOoKKiAunp6QCAadOm4erVq9iwYQOSk5NNz+Nsn0Qiurp7cOJXnc318Y+Gw38Yb/NF1jnc8rw3OAEgODgYDz30EFpaWgAAOp0Ozc3NyMzMNIUcAMycORPh4eGora01Laurq0NQUBBSU1NNyxQKBbKysnDp0iVcuHDB5T6JiOQmdMCora0NWq0WUVFRAACNRgMApu/vplarodVqTd9rtVqoVCqzQASA6Ohos75c6ZOISG4uvyeRJAlr1qyB0WhETk4OAECv1wMAQkJCLNqHhITg3Llzpu/1ej0mTJhgtd3dfbnSpytGjQoUelx/CgsL8nQJbuNLY5PaOhEU+IDN9UOHDYHkp8TfbZ2An5/VNsMf8EfQiKHCzwEAI0YMQ9h/RtxXH0OG+NttY29933JHffRXrY766C/u/l10OTw/+OADHDp0CO+99x4mTpxotk6hUFh9zL3LbbVzpa29Puy5ceM2jEZJ6LH9ISwsCK2ttzz2/O7ka2Pr7Oox3bPcmtudXTijaTW7t/m94h8Nxz8dXcLPAQCdnV1o7e29rz66u+23sbX+7rE56qO/anXUR39w5XdRqVQIbVS59La9rKwMO3fuRHFxMbKzs03LQ0NDAdzZWrxbe3u72dZjaGiozXbAnS1NV/okIpKb0+G5efNmVFVVoaioCIsXLzZb17df0tp+SI1GY7bfUqVS4eLFixYfNerbx6lWq13uk4hIbk6FZ0VFBSorK5Gfn4/ly5dbrI+IiEBMTAxqamrMQrGpqQk6nQ4ZGRmmZenp6TAYDGhoaDDro7q6GpGRkVCpVC73SUQkN4f7PHfu3Iny8nLMmjULM2bMMPtg/NChQzFp0iQAQGFhIXJyclBQUICFCxdCp9OhpKQEcXFxmDNnjukxycnJSEhIQHFxMfR6PcaNG4fq6mqcPHkSlZWVZs/tbJ9ERHJzGJ7fffed6f++r/uMHTvWtAU5ffp0VFVVoby8HLm5uQgICEBaWhqKiorgd9eRSoVCgcrKSpSWlqKsrAwGgwEqlQoVFRVISUkx69/ZPomI5OYwPHfv3u10Z0lJSUhKSnLYLjAwEGvXrsXatWv7rU8iIjnxqkpERAJ44i7RfVAoFejo6rG53oMfKSY3Y3gS3Yeu7l6c0bTaXB+nDpOxGpIT37YTEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJ4IVByOf0GIGubttXMgKAYUP84c9NA3Ijhif5nK7uHpz4VWe3Tfyj4fAfxl9vch++NhMRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQlgeBIRCWB4EhEJYHgSEQngfQqISFYKpQIdXbbvQeUr959ieBKRrLq6e3FG02pzva/cf8oH8p2IyPt4f7zToOPo1sJGScZiiGxgeJKsnLnnulECTp63fWvhOHVYf5dF5DKGJznNUfBJbZ3oNcLuzn5n7rnOcCRfwPAkpzkKvqDAB/DI+BCf2NlPdL94wIiISADDk4hIAMOTiEgAw5OISADDk4hIAMOTiEgAw5OISADDk4hIAMOTiEgAw5OISADDk4hIAMOTiEgAr+BAJryOJpHzGJ5k4uiqSbxUHNEdPvG2vaOjA+vXr0diYiJiY2ORnZ2N+vp6T5dFRIOYT4RnXl4eampqkJ+fj23btkGlUiEvLw+NjY2eLo2IBimvf9ve2NiII0eOoKKiAunp6QCAadOm4erVq9iwYQOSk5M9XKHnOXNriyH+/ujucXz7CyJPc3RrYsA7bk/s9eFZV1eHoKAgpKammpYpFApkZWVhzZo1uHDhAlQqlQcr9Dxnb21h73avfW2IPM3RrYkB77g9sdeHp1arhUqlglJp/jITHR0NANBoNC6Fp1Kp6Nf6RNxdQ68R+N+eXrvth/r7wc/Oq6y/nxIjHhhit4/+aONo/fBh/vD3U9r9GctRR18bR3Pt7PMMH+aP3h7r7eSo1Z0/s7vH5u21ulIH4PzfumgmKCRJ8uo3a7Nnz8aECROwbds2s+WXL1/G7NmzsW7dOjz//PMeqo6IBiufOGCkUNh+ZbC3jojIXbw+PENDQ6HX6y2Wt7e3AwBCQkLkLomIyPvDU6VS4eLFizAajWbLNRoNAECtVnuiLCIa5Lw+PNPT02EwGNDQ0GC2vLq6GpGRkYP+SDsReYbXH21PTk5GQkICiouLodfrMW7cOFRXV+PkyZOorKz0dHlENEh5/dF2ALh9+zZKS0tRW1sLg8EAlUqFV199FWlpaZ4ujYgGKZ8ITyIib+P1+zyJiLwRw5OISIDXHzDyNS0tLdixYwfOnj2L8+fPo7OzE7t27UJCQoJZu5SUFPz1118Wj3/ppZdQWFgoV7lOa2pqwr59+/DTTz+hpaUFISEhiI2NxWuvvWY6VbbP4cOHsXnzZpw/fx4BAQFIT09HYWEhgoODPVS9bc6Oa9GiRTh+/LjF4+fOnYuysjI5S3baqVOnsHXrVmg0Guj1egQEBECtViMnJ8figjq+NGfOjsvdc8bw7GdXrlzBgQMHMGnSJEybNs3iI1Z3i4+PtwjK8PBwd5coZO/evdDr9Vi6dCkmTpyI69evY8eOHZg/fz52796Nxx57DABw7Ngx5ObmIjU1FStWrMDff/+NkpISaDQa7Nmzx+IaBZ7m7LgAYMKECXj//ffNHj9y5Ei5S3aawWBAZGQksrOzMXr0aBgMBnz22WfIzc1FaWkpnnrqKQC+N2fOjgtw85xJ1K96e3tNX9fV1UlqtVo6evSoRbtZs2ZJr7zyipyl3Zfr169bLGtvb5eeeOIJKS8vz7Ts2WeflTIzM81+Dj/88IOkVqulAwcOyFKrK5wd14svvig988wzcpbmFt3d3VJSUpK0aNEi0zJfmzNrrI3L3XPmXS8pA4C3vUr3l1GjRlksCw4OxkMPPYSWlhYAgE6nQ3NzMzIzM81+DjNnzkR4eDhqa2tlq9dZzoxrIPH390dQUBCGDPn3qkW+OGfW3DsuOQzMv3QfcfToUTz++OOIiYnB008/jT179kDyoU+OtbW1QavVIioqCsCdU2b7vr+bWq2GVquVtT5R946rz++//474+HhMmjQJGRkZqKysRHd3t4eqdJ7RaERPTw90Oh22bNmCy5cvY8mSJQB8e87sjauPO+eM+zw95Mknn0RMTAzGjx8PvV6P/fv346233sLly5fxxhtveLo8hyRJwpo1a2A0GpGTkwMApgu4WLtYS0hICM6dOydrjSKsjQsApk6dirlz5+Lhhx9GZ2cnDh06hC1btuDs2bPYunWrByt2bMWKFaYtyMDAQGzatAlJSUkAfHvO7I0LkGHO3LZDgOzu87SmoKBAeuSRR6Q///zTzZXdvw0bNkhqtVr68ssvTcv2798vqdVqqbm52aJ9QUGBNGPGDDlLFGJtXLaUlpZKarVaOnHihAyVifvjjz+kM2fOSPX19dLKlSulyZMnSzU1NZIk+fac2RuXLf05Z3zb7kWysrJgNBrx888/e7oUu8rKyrBz504UFxcjOzvbtDw0NBQAbF5C0NsvH2hrXLbMmzcPAHD69Gl3l3Zfxo8fj9jYWKSkpKC0tBSJiYl4++23YTQafXrO7I3Llv6cM4anF+mbdG8+6LR582ZUVVWhqKgIixcvNlvXt9/M2n4yjUZjdb+at7A3Llt8Yb6smTJlCtrb29HW1ubTc3avu8dlS3/OmW/N+gC3b98+KJVKTJkyxdOlWFVRUYHKykrk5+dj+fLlFusjIiIQExODmpoas1f/pqYm6HQ6ZGRkyFmu0xyNy5Z9+/YBAOLi4txVWr+TJAnHjx9HcHAwQkNDfXbO7nXvuGzpzznjASM3OHjwIACgubkZAHDixAncvHkTw4cPR3JyMr755hvU19cjOTkZERERaG9vx/79+3Ho0CHk5ORgzJgxnizfqp07d6K8vByzZs3CjBkzzN72DB06FJMmTQIAFBYWIicnBwUFBVi4cCF0Oh1KSkoQFxeHOXPmeKp8m5wZ148//oiPPvoIGRkZGDt2LDo7O1FfX4+vvvoKc+bMwdSpUz04Attef/11jB07FpMnT8bIkSPR2tqKr7/+GkePHsWaNWvg7//vn7+vzZkz45JjznhVJTe493TFPmPHjkVDQwNOnz6NTZs24cKFC9Dr9RgyZAiio6OxcOFCZGVlyVytc2yd6gbcGVef77//HuXl5aZT/dLS0lBUVOSV+8+cGdeVK1fwzjvv4Pz587h58yaUSiUiIyMxb948LFq0CH5+fjJX7ZxPP/0UNTU1uHz5Mm7duoWgoCDExMTghRdeQEpKillbX5ozZ8Ylx5wxPImIBHCfJxGRAIYnEZEAhicRkQCGJxGRAIYnEZEAhicRkQCGJxGRAIYnEZEAhicRkYD/A0T9FP1N0A/xAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 360x216 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "task_name = 'Photovoltaic efficiency'\n",
    "tasks = ['PCE']\n",
    "\n",
    "raw_filename = \"../data/cep-processed.csv\"\n",
    "feature_filename = raw_filename.replace('.csv','.pickle')\n",
    "filename = raw_filename.replace('.csv','')\n",
    "prefix_filename = raw_filename.split('/')[-1].replace('.csv','')\n",
    "smiles_tasks_df = pd.read_csv(raw_filename)\n",
    "smilesList = smiles_tasks_df.smiles.values\n",
    "print(\"number of all smiles: \",len(smilesList))\n",
    "atom_num_dist = []\n",
    "remained_smiles = []\n",
    "canonical_smiles_list = []\n",
    "for smiles in smilesList:\n",
    "    try:        \n",
    "        mol = Chem.MolFromSmiles(smiles)\n",
    "        atom_num_dist.append(len(mol.GetAtoms()))\n",
    "        remained_smiles.append(smiles)\n",
    "        canonical_smiles_list.append(Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True))\n",
    "    except:\n",
    "        print(smiles)\n",
    "        pass\n",
    "print(\"number of successfully processed smiles: \", len(remained_smiles))\n",
    "smiles_tasks_df = smiles_tasks_df[smiles_tasks_df[\"smiles\"].isin(remained_smiles)]\n",
    "smiles_tasks_df['cano_smiles'] =canonical_smiles_list\n",
    "\n",
    "plt.figure(figsize=(5, 3))\n",
    "sns.set(font_scale=1.5)\n",
    "ax = sns.distplot(atom_num_dist, bins=28, kde=False)\n",
    "plt.tight_layout()\n",
    "# plt.savefig(\"atom_num_dist_\"+prefix_filename+\".png\",dpi=200)\n",
    "plt.show()\n",
    "plt.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "random_seed = 888\n",
    "start_time = str(time.ctime()).replace(':','-').replace(' ','_')\n",
    "\n",
    "batch_size = 200\n",
    "epochs = 800\n",
    "\n",
    "p_dropout= 0.15\n",
    "fingerprint_dim = 200\n",
    "\n",
    "weight_decay = 4.5 # also known as l2_regularization_lambda\n",
    "learning_rate = 3.6\n",
    "radius = 3\n",
    "T = 1\n",
    "per_task_output_units_num = 1 # for regression model\n",
    "output_units_num = len(tasks) * per_task_output_units_num"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>smiles</th>\n",
       "      <th>PCE</th>\n",
       "      <th>cano_smiles</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "Empty DataFrame\n",
       "Columns: [smiles, PCE, cano_smiles]\n",
       "Index: []"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "if os.path.isfile(feature_filename):\n",
    "    feature_dicts = pickle.load(open(feature_filename, \"rb\" ))\n",
    "else:\n",
    "    feature_dicts = save_smiles_dicts(smilesList,filename)\n",
    "# feature_dicts = get_smiles_dicts(smilesList)\n",
    "remained_df = smiles_tasks_df[smiles_tasks_df[\"cano_smiles\"].isin(feature_dicts['smiles_to_atom_mask'].keys())]\n",
    "uncovered_df = smiles_tasks_df.drop(remained_df.index)\n",
    "uncovered_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_df = remained_df.sample(frac=1/5,random_state=random_seed)\n",
    "train_df = remained_df.drop(test_df.index)\n",
    "train_df = train_df.reset_index(drop=True)\n",
    "test_df = test_df.reset_index(drop=True)\n",
    "# print(len(test_df),sorted(test_df.cano_smiles.values))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1145405\n",
      "atom_fc.weight torch.Size([200, 39])\n",
      "atom_fc.bias torch.Size([200])\n",
      "neighbor_fc.weight torch.Size([200, 49])\n",
      "neighbor_fc.bias torch.Size([200])\n",
      "GRUCell.0.weight_ih torch.Size([600, 200])\n",
      "GRUCell.0.weight_hh torch.Size([600, 200])\n",
      "GRUCell.0.bias_ih torch.Size([600])\n",
      "GRUCell.0.bias_hh torch.Size([600])\n",
      "GRUCell.1.weight_ih torch.Size([600, 200])\n",
      "GRUCell.1.weight_hh torch.Size([600, 200])\n",
      "GRUCell.1.bias_ih torch.Size([600])\n",
      "GRUCell.1.bias_hh torch.Size([600])\n",
      "GRUCell.2.weight_ih torch.Size([600, 200])\n",
      "GRUCell.2.weight_hh torch.Size([600, 200])\n",
      "GRUCell.2.bias_ih torch.Size([600])\n",
      "GRUCell.2.bias_hh torch.Size([600])\n",
      "align.0.weight torch.Size([1, 400])\n",
      "align.0.bias torch.Size([1])\n",
      "align.1.weight torch.Size([1, 400])\n",
      "align.1.bias torch.Size([1])\n",
      "align.2.weight torch.Size([1, 400])\n",
      "align.2.bias torch.Size([1])\n",
      "attend.0.weight torch.Size([200, 200])\n",
      "attend.0.bias torch.Size([200])\n",
      "attend.1.weight torch.Size([200, 200])\n",
      "attend.1.bias torch.Size([200])\n",
      "attend.2.weight torch.Size([200, 200])\n",
      "attend.2.bias torch.Size([200])\n",
      "mol_GRUCell.weight_ih torch.Size([600, 200])\n",
      "mol_GRUCell.weight_hh torch.Size([600, 200])\n",
      "mol_GRUCell.bias_ih torch.Size([600])\n",
      "mol_GRUCell.bias_hh torch.Size([600])\n",
      "mol_align.weight torch.Size([1, 400])\n",
      "mol_align.bias torch.Size([1])\n",
      "mol_attend.weight torch.Size([200, 200])\n",
      "mol_attend.bias torch.Size([200])\n",
      "output.weight torch.Size([1, 200])\n",
      "output.bias torch.Size([1])\n"
     ]
    }
   ],
   "source": [
    "x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array([canonical_smiles_list[0]],feature_dicts)\n",
    "num_atom_features = x_atom.shape[-1]\n",
    "num_bond_features = x_bonds.shape[-1]\n",
    "loss_function = nn.MSELoss()\n",
    "model = Fingerprint(radius, T, num_atom_features, num_bond_features,\n",
    "            fingerprint_dim, output_units_num, p_dropout)\n",
    "model.cuda()\n",
    "\n",
    "# optimizer = optim.Adam(model.parameters(), learning_rate, weight_decay=weight_decay)\n",
    "optimizer = optim.Adam(model.parameters(), 10**-learning_rate, weight_decay=10**-weight_decay)\n",
    "# optimizer = optim.SGD(model.parameters(), 10**-learning_rate, weight_decay=10**-weight_decay)\n",
    "\n",
    "# tensorboard = SummaryWriter(log_dir=\"runs/\"+start_time+\"_\"+prefix_filename+\"_\"+str(fingerprint_dim)+\"_\"+str(p_dropout))\n",
    "\n",
    "model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n",
    "params = sum([np.prod(p.size()) for p in model_parameters])\n",
    "print(params)\n",
    "for name, param in model.named_parameters():\n",
    "    if param.requires_grad:\n",
    "        print(name, param.data.shape)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model, dataset, optimizer, loss_function):\n",
    "    model.train()\n",
    "    np.random.seed(epoch)\n",
    "    valList = np.arange(0,dataset.shape[0])\n",
    "    #shuffle them\n",
    "    np.random.shuffle(valList)\n",
    "    batch_list = []\n",
    "    for i in range(0, dataset.shape[0], batch_size):\n",
    "        batch = valList[i:i+batch_size]\n",
    "        batch_list.append(batch)   \n",
    "    for counter, batch in enumerate(batch_list):\n",
    "        batch_df = dataset.loc[batch,:]\n",
    "        smiles_list = batch_df.cano_smiles.values\n",
    "        y_val = batch_df[tasks[0]].values\n",
    "        \n",
    "        x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(smiles_list,feature_dicts)\n",
    "        atoms_prediction, mol_prediction = model(torch.Tensor(x_atom),torch.Tensor(x_bonds),torch.cuda.LongTensor(x_atom_index),torch.cuda.LongTensor(x_bond_index),torch.Tensor(x_mask))\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        loss = loss_function(mol_prediction, torch.Tensor(y_val).view(-1,1))     \n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "def eval(model, dataset):\n",
    "    model.eval()\n",
    "    test_MAE_list = []\n",
    "    test_MSE_list = []\n",
    "    valList = np.arange(0,dataset.shape[0])\n",
    "    batch_list = []\n",
    "    for i in range(0, dataset.shape[0], batch_size):\n",
    "        batch = valList[i:i+batch_size]\n",
    "        batch_list.append(batch) \n",
    "    for counter, batch in enumerate(batch_list):\n",
    "        batch_df = dataset.loc[batch,:]\n",
    "        smiles_list = batch_df.cano_smiles.values\n",
    "#         print(batch_df)\n",
    "        y_val = batch_df[tasks[0]].values\n",
    "        \n",
    "        x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(smiles_list,feature_dicts)\n",
    "        atoms_prediction, mol_prediction = model(torch.Tensor(x_atom),torch.Tensor(x_bonds),torch.cuda.LongTensor(x_atom_index),torch.cuda.LongTensor(x_bond_index),torch.Tensor(x_mask))\n",
    "        MAE = F.l1_loss(mol_prediction, torch.Tensor(y_val).view(-1,1), reduction='none')        \n",
    "        MSE = F.mse_loss(mol_prediction, torch.Tensor(y_val).view(-1,1), reduction='none')\n",
    "#         print(x_mask[:2],atoms_prediction.shape, mol_prediction,MSE)\n",
    "        \n",
    "        test_MAE_list.extend(MAE.data.squeeze().cpu().numpy())\n",
    "        test_MSE_list.extend(MSE.data.squeeze().cpu().numpy())\n",
    "    return np.array(test_MAE_list).mean(), np.array(test_MSE_list).mean()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 27.15669 26.759457\n",
      "1 4.7991877 4.621017\n",
      "2 3.2770875 3.040153\n",
      "3 2.602797 2.3623934\n",
      "4 2.7828069 2.5239089\n",
      "5 2.476531 2.249524\n",
      "6 2.3250942 2.104203\n",
      "7 2.4555514 2.238593\n",
      "8 2.2959468 2.0666234\n",
      "9 2.2373269 2.0062492\n",
      "10 2.319619 2.0734603\n",
      "11 2.1590562 1.917183\n",
      "12 2.0391316 1.8026553\n",
      "13 2.0410283 1.8109373\n",
      "14 1.9755106 1.7508795\n",
      "15 1.891383 1.6648682\n",
      "16 1.9350168 1.7146838\n",
      "17 1.804558 1.5876269\n",
      "18 1.794932 1.5847195\n",
      "19 1.8521775 1.6302102\n",
      "20 1.7242821 1.5270039\n",
      "21 1.8371214 1.6145465\n",
      "22 1.6477945 1.4541367\n",
      "23 1.6145102 1.4288635\n",
      "24 1.5849146 1.3988746\n",
      "25 1.6273644 1.423822\n",
      "26 1.5235523 1.3344448\n",
      "27 1.64542 1.4477166\n",
      "28 1.5082928 1.3092469\n",
      "29 1.463373 1.2903223\n",
      "30 1.4622312 1.2901535\n",
      "31 1.4488202 1.2921972\n",
      "32 1.3834699 1.2081531\n",
      "33 1.3543432 1.1837801\n",
      "34 1.3686943 1.2112132\n",
      "35 1.3931277 1.2270683\n",
      "36 1.3081926 1.1670799\n",
      "37 1.2985831 1.1536529\n",
      "38 1.4471424 1.318887\n",
      "39 1.4146317 1.2342101\n",
      "40 1.2012727 1.0946933\n",
      "41 1.186057 1.0632837\n",
      "42 1.219709 1.0991125\n",
      "43 1.2272531 1.105064\n",
      "44 1.2387722 1.1112021\n",
      "45 1.2059183 1.0740219\n",
      "46 1.1114184 1.0433081\n",
      "47 1.1290859 1.0441196\n",
      "48 1.0909114 1.0152723\n",
      "49 1.1661289 1.0528119\n",
      "50 1.0637543 0.99233896\n",
      "51 1.0870022 1.0184348\n",
      "52 1.043846 0.99298537\n",
      "53 0.99555445 0.9662101\n",
      "54 0.9984948 0.9660103\n",
      "55 0.97517085 0.9542891\n",
      "56 1.0253409 0.9939455\n",
      "57 0.9909448 1.0017823\n",
      "58 0.93733394 0.9621734\n",
      "59 1.0412904 0.9885154\n",
      "60 0.9309326 0.95500404\n",
      "61 1.0459865 1.0538589\n",
      "62 0.9368447 0.9492134\n",
      "63 1.0184581 1.0619771\n",
      "64 0.9448913 0.9446173\n",
      "65 0.86581635 0.9014623\n",
      "66 0.86538374 0.9175826\n",
      "67 0.8485681 0.92073125\n",
      "68 0.89167875 0.9932622\n",
      "69 0.8554769 0.922746\n",
      "70 0.8070066 0.89915925\n",
      "71 0.81967425 0.8939424\n",
      "72 0.7833921 0.8695017\n",
      "73 0.76880854 0.86843\n",
      "74 0.7473362 0.85108495\n",
      "75 0.7507556 0.84176135\n",
      "76 0.7770429 0.86160654\n",
      "77 0.7319464 0.88463753\n",
      "78 0.72749615 0.87530667\n",
      "79 0.70294654 0.8628179\n",
      "80 0.6795799 0.85910106\n",
      "81 0.74708474 0.92938226\n",
      "82 0.6486463 0.8225956\n",
      "83 0.6631697 0.8150859\n",
      "84 0.6419646 0.8278555\n",
      "85 0.68731415 0.8656795\n",
      "86 0.6410482 0.79260993\n",
      "87 0.6555627 0.803408\n",
      "88 0.618638 0.8107156\n",
      "89 0.5766453 0.787682\n",
      "90 0.57536 0.7730598\n",
      "91 0.5990949 0.79473406\n",
      "92 0.6157485 0.8175044\n",
      "93 0.5629281 0.7538691\n",
      "94 0.57936466 0.82498246\n",
      "95 0.56379044 0.83117783\n",
      "96 0.528298 0.7758802\n",
      "97 0.5363541 0.8193125\n",
      "98 0.6231807 0.88344413\n",
      "99 0.4871565 0.77467215\n",
      "100 0.5173492 0.80408806\n",
      "101 0.59408826 0.88925934\n",
      "102 0.4806829 0.75249684\n",
      "103 0.48374397 0.772358\n",
      "104 0.49087018 0.79808265\n",
      "105 0.56004494 0.77125883\n",
      "106 0.4868442 0.80220604\n",
      "107 0.48188132 0.7594041\n",
      "108 0.502161 0.79074126\n",
      "109 0.49296734 0.7896659\n",
      "110 0.43724382 0.7902456\n",
      "111 0.44128066 0.7890351\n",
      "112 0.44503137 0.77589\n",
      "113 0.4138892 0.76096433\n",
      "114 0.3994712 0.7909683\n",
      "115 0.40142024 0.7744389\n",
      "116 0.43340716 0.76908857\n",
      "117 0.396297 0.7348004\n",
      "118 0.38608494 0.76469874\n",
      "119 0.43558303 0.7541787\n",
      "120 0.5564166 0.77617335\n",
      "121 0.39569944 0.76486814\n",
      "122 0.3811065 0.736434\n",
      "123 0.417991 0.7564122\n",
      "124 0.36462662 0.74704695\n",
      "125 0.34782964 0.70915896\n",
      "126 0.37518218 0.797148\n",
      "127 0.38740963 0.79485625\n",
      "128 0.35327497 0.809082\n",
      "129 0.35470128 0.766439\n",
      "130 0.4281777 0.76962894\n",
      "131 0.34566724 0.7480376\n",
      "132 0.33605942 0.7331934\n",
      "133 0.33857563 0.77424794\n",
      "134 0.34033918 0.8244019\n",
      "135 0.3048292 0.75581086\n",
      "136 0.32151294 0.7516069\n",
      "137 0.34446204 0.7457818\n",
      "138 0.3201697 0.7110824\n",
      "139 0.3140837 0.7728491\n",
      "140 0.31666112 0.78044426\n",
      "141 0.32965833 0.75096303\n",
      "142 0.29528975 0.7208563\n",
      "143 0.29746634 0.76241255\n",
      "144 0.31057408 0.72752905\n"
     ]
    }
   ],
   "source": [
    "best_param ={}\n",
    "best_param[\"train_epoch\"] = 0\n",
    "best_param[\"test_epoch\"] = 0\n",
    "best_param[\"train_MSE\"] = 9e8\n",
    "best_param[\"test_MSE\"] = 9e8\n",
    "\n",
    "for epoch in range(800):\n",
    "    train_MAE, train_MSE = eval(model, train_df)\n",
    "    test_MAE, test_MSE = eval(model, test_df)\n",
    "#     tensorboard.add_scalars('MAE',{'train_MAE':test_MAE, 'test_MAE':test_MSE}, epoch)\n",
    "#     tensorboard.add_scalars('MSE',{'train_MSE':test_MAE, 'test_MSE':test_MSE}, epoch)\n",
    "    if train_MSE < best_param[\"train_MSE\"]:\n",
    "        best_param[\"train_epoch\"] = epoch\n",
    "        best_param[\"train_MSE\"] = train_MSE\n",
    "    if test_MSE < best_param[\"test_MSE\"]:\n",
    "        best_param[\"test_epoch\"] = epoch\n",
    "        best_param[\"test_MSE\"] = test_MSE\n",
    "        if test_MSE < 0.9:\n",
    "             torch.save(model, 'saved_models/model_'+prefix_filename+'_'+start_time+'_'+str(epoch)+'.pt')\n",
    "    if (epoch - best_param[\"train_epoch\"] >2) and (epoch - best_param[\"test_epoch\"] >18):        \n",
    "        break\n",
    "    print(epoch, train_MSE, test_MSE)\n",
    "    \n",
    "    train(model, train_df, optimizer, loss_function)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "best epoch: 125 \n",
      " test MSE: 0.70915896\n"
     ]
    }
   ],
   "source": [
    "# evaluate model\n",
    "best_model = torch.load('saved_models/model_'+prefix_filename+'_'+start_time+'_'+str(best_param[\"test_epoch\"])+'.pt')     \n",
    "\n",
    "# best_model_dict = best_model.state_dict()\n",
    "# best_model_wts = copy.deepcopy(best_model_dict)\n",
    "\n",
    "# model.load_state_dict(best_model_wts)\n",
    "# (best_model.align[0].weight == model.align[0].weight).all()\n",
    "test_MAE, test_MSE = eval(best_model, test_df)\n",
    "print(\"best epoch:\",best_param[\"test_epoch\"],\"\\n\",\"test MSE:\",test_MSE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
