{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Load Python Modules"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.simplefilter(action='ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/pylon5/ac5616p/yuke/anaconda3/envs/MolEnv2/lib/python3.6/site-packages/sklearn/externals/joblib/__init__.py:15: DeprecationWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.\n",
      "  warnings.warn(msg, category=DeprecationWarning)\n"
     ]
    }
   ],
   "source": [
    "from __future__ import print_function\n",
    "from __future__ import division\n",
    "from __future__ import unicode_literals\n",
    "\n",
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import shutil\n",
    "import deepchem as dc\n",
    "import json\n",
    "\n",
    "from rdkit import Chem\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from  sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from tqdm import tnrange, tqdm_notebook"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "os.chdir('/home/yuke/PythonProject/DrugEmbedding/')\n",
    "from decode import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "SIDER dataset loader.\n",
    "\"\"\"\n",
    "def load_sider(featurizer='ECFP', split='index', seed=0):\n",
    "    #current_dir = os.path.dirname(os.path.realpath(__file__))\n",
    "\n",
    "    # Load SIDER dataset\n",
    "    print(\"About to load SIDER dataset.\")\n",
    "    dataset_file = os.path.join(\"./data/sider/deepchem/sider.csv.gz\")\n",
    "    dataset = dc.utils.save.load_from_disk(dataset_file)\n",
    "    print(\"Columns of dataset: %s\" % str(dataset.columns.values))\n",
    "    print(\"Number of examples in dataset: %s\" % str(dataset.shape[0]))\n",
    "\n",
    "    # Featurize SIDER dataset\n",
    "    print(\"About to featurize SIDER dataset.\")\n",
    "    if featurizer == 'ECFP':\n",
    "        featurizer = dc.feat.CircularFingerprint(size=1024)\n",
    "    elif featurizer == 'GraphConv':\n",
    "        featurizer = dc.feat.ConvMolFeaturizer()\n",
    "\n",
    "    SIDER_tasks = dataset.columns.values[1:].tolist()\n",
    "    print(\"SIDER tasks: %s\" % str(SIDER_tasks))\n",
    "    print(\"%d tasks in total\" % len(SIDER_tasks))\n",
    "\n",
    "    loader = dc.data.CSVLoader(\n",
    "      tasks=SIDER_tasks, smiles_field=\"smiles\", featurizer=featurizer)\n",
    "    dataset = loader.featurize(dataset_file)\n",
    "    print(\"%d datapoints in SIDER dataset\" % len(dataset))\n",
    "\n",
    "    # Initialize transformers\n",
    "    transformers = [\n",
    "      dc.trans.BalancingTransformer(transform_w=True, dataset=dataset)]\n",
    "    print(\"About to transform data\")\n",
    "    for transformer in transformers:\n",
    "        dataset = transformer.transform(dataset)\n",
    "\n",
    "    splitters = {'index': dc.splits.IndexSplitter(),\n",
    "               'random': dc.splits.RandomSplitter(),\n",
    "               'scaffold': dc.splits.ScaffoldSplitter()}\n",
    "    splitter = splitters[split]\n",
    "    train, valid, test = splitter.train_valid_test_split(dataset, seed=seed)\n",
    "\n",
    "    return SIDER_tasks, (train, valid, test), transformers"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Load Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 435,
   "metadata": {},
   "outputs": [],
   "source": [
    "#seed = 100, 99, 98, 97, 95\n",
    "seed = 101\n",
    "np.random.seed(seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 436,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'data_dir': './data/fda_drugs',\n",
       " 'data_file': 'smiles_set_clean.smi',\n",
       " 'fda_file': 'all_drugs.smi',\n",
       " 'vocab_file': 'char_set_clean.pkl',\n",
       " 'atc_sim_file': 'drugs_sp_all.csv',\n",
       " 'checkpoint_dir': './experiments/KDD',\n",
       " 'experiment_name': 'kdd_010',\n",
       " 'task': 'vae + atc',\n",
       " 'limit': 0,\n",
       " 'batch_size': 128,\n",
       " 'epochs': 100,\n",
       " 'max_sequence_length': 120,\n",
       " 'learning_rate': 0.0003,\n",
       " 'max_norm': 1000000000000.0,\n",
       " 'wd': 0.0,\n",
       " 'manifold_type': 'Lorentz',\n",
       " 'prior_type': 'Standard',\n",
       " 'num_centroids': 0,\n",
       " 'bidirectional': False,\n",
       " 'num_layers': 1,\n",
       " 'hidden_size': 512,\n",
       " 'latent_size': 64,\n",
       " 'word_dropout_rate': 0.2,\n",
       " 'anneal_function': 'logistic',\n",
       " 'k': 0.51,\n",
       " 'x0': 29.0,\n",
       " 'C': 1.0,\n",
       " 'num_workers': 4,\n",
       " 'logging_steps': 1,\n",
       " 'save_per_epochs': 10,\n",
       " 'new_training': False,\n",
       " 'new_annealing': False,\n",
       " 'checkpoint': 'checkpoint_epoch110.model',\n",
       " 'trained_epochs': 110,\n",
       " 'alpha': 0.0,\n",
       " 'beta': 0.015625,\n",
       " 'gamma': 0.0,\n",
       " 'delta': 11.0,\n",
       " 'nneg': 11,\n",
       " 'fda_prop': 0.2}"
      ]
     },
     "execution_count": 436,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "exp_dir = './experiments/KDD/kdd_010'\n",
    "checkpoint = 'checkpoint_epoch110.model'\n",
    "#exp_dir = './experiments/EXP_TASK/exp_task_010'\n",
    "#checkpoint = 'checkpoint_epoch100.model'\n",
    "config_path = os.path.join(exp_dir, 'configs.json')\n",
    "checkpoint_path = os.path.join(exp_dir, checkpoint)\n",
    "\n",
    "with open(config_path, 'r') as fp:\n",
    "    configs = json.load(fp)\n",
    "fp.close()\n",
    "\n",
    "configs['checkpoint'] = checkpoint\n",
    "configs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 437,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "HVAE(\n",
      "  (encoder_rnn): GRU(49, 512, batch_first=True)\n",
      "  (decoder_rnn): GRU(49, 512, batch_first=True)\n",
      "  (hidden2mean): Linear(in_features=512, out_features=64, bias=True)\n",
      "  (hidden2logv): Linear(in_features=512, out_features=64, bias=True)\n",
      "  (latent2hidden): Linear(in_features=65, out_features=512, bias=True)\n",
      "  (outputs2vocab): Linear(in_features=512, out_features=49, bias=True)\n",
      "  (RECON): NLLLoss()\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "model = load_model(configs)\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 438,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'data_dir': './data/fda_drugs',\n",
       " 'data_file': 'smiles_set_clean.smi',\n",
       " 'fda_file': 'all_drugs.smi',\n",
       " 'vocab_file': 'char_set_clean.pkl',\n",
       " 'atc_sim_file': 'drugs_sp_all.csv',\n",
       " 'checkpoint_dir': './experiments/KDD',\n",
       " 'experiment_name': 'kdd_009',\n",
       " 'task': 'vae + atc',\n",
       " 'limit': 0,\n",
       " 'batch_size': 128,\n",
       " 'epochs': 100,\n",
       " 'max_sequence_length': 120,\n",
       " 'learning_rate': 0.0003,\n",
       " 'max_norm': 1000000000000.0,\n",
       " 'wd': 0.0,\n",
       " 'manifold_type': 'Euclidean',\n",
       " 'prior_type': 'Standard',\n",
       " 'num_centroids': 0,\n",
       " 'bidirectional': False,\n",
       " 'num_layers': 1,\n",
       " 'hidden_size': 512,\n",
       " 'latent_size': 64,\n",
       " 'word_dropout_rate': 0.2,\n",
       " 'anneal_function': 'logistic',\n",
       " 'k': 0.51,\n",
       " 'x0': 29.0,\n",
       " 'C': 1.0,\n",
       " 'num_workers': 4,\n",
       " 'logging_steps': 1,\n",
       " 'save_per_epochs': 10,\n",
       " 'new_training': False,\n",
       " 'new_annealing': False,\n",
       " 'checkpoint': 'checkpoint_epoch110.model',\n",
       " 'trained_epochs': 110,\n",
       " 'alpha': 0.0,\n",
       " 'beta': 0.015625,\n",
       " 'gamma': 0.0,\n",
       " 'delta': 11.0,\n",
       " 'nneg': 11,\n",
       " 'fda_prop': 0.2}"
      ]
     },
     "execution_count": 438,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "exp_dir = './experiments/KDD/kdd_009'\n",
    "checkpoint = 'checkpoint_epoch110.model'\n",
    "#exp_dir = './experiments/EXP_TASK/exp_task_009'\n",
    "#checkpoint = 'checkpoint_epoch100.model'\n",
    "config_path = os.path.join(exp_dir, 'configs.json')\n",
    "checkpoint_path = os.path.join(exp_dir, checkpoint)\n",
    "\n",
    "with open(config_path, 'r') as fp:\n",
    "    configs_e = json.load(fp)\n",
    "fp.close()\n",
    "\n",
    "configs_e['checkpoint'] = checkpoint\n",
    "configs_e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 439,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "EVAE(\n",
      "  (encoder_rnn): GRU(49, 512, batch_first=True)\n",
      "  (decoder_rnn): GRU(49, 512, batch_first=True)\n",
      "  (hidden2mean): Linear(in_features=512, out_features=64, bias=True)\n",
      "  (hidden2logv): Linear(in_features=512, out_features=64, bias=True)\n",
      "  (latent2hidden): Linear(in_features=64, out_features=512, bias=True)\n",
      "  (outputs2vocab): Linear(in_features=512, out_features=49, bias=True)\n",
      "  (RECON): NLLLoss()\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "model_e = load_model(configs_e)\n",
    "print(model_e)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Process Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 440,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "About to load SIDER dataset.\n",
      "Columns of dataset: ['smiles' 'Hepatobiliary disorders' 'Metabolism and nutrition disorders'\n",
      " 'Product issues' 'Eye disorders' 'Investigations'\n",
      " 'Musculoskeletal and connective tissue disorders'\n",
      " 'Gastrointestinal disorders' 'Social circumstances'\n",
      " 'Immune system disorders' 'Reproductive system and breast disorders'\n",
      " 'Neoplasms benign, malignant and unspecified (incl cysts and polyps)'\n",
      " 'General disorders and administration site conditions'\n",
      " 'Endocrine disorders' 'Surgical and medical procedures'\n",
      " 'Vascular disorders' 'Blood and lymphatic system disorders'\n",
      " 'Skin and subcutaneous tissue disorders'\n",
      " 'Congenital, familial and genetic disorders'\n",
      " 'Infections and infestations'\n",
      " 'Respiratory, thoracic and mediastinal disorders' 'Psychiatric disorders'\n",
      " 'Renal and urinary disorders'\n",
      " 'Pregnancy, puerperium and perinatal conditions'\n",
      " 'Ear and labyrinth disorders' 'Cardiac disorders'\n",
      " 'Nervous system disorders'\n",
      " 'Injury, poisoning and procedural complications']\n",
      "Number of examples in dataset: 1427\n",
      "About to featurize SIDER dataset.\n",
      "SIDER tasks: ['Hepatobiliary disorders', 'Metabolism and nutrition disorders', 'Product issues', 'Eye disorders', 'Investigations', 'Musculoskeletal and connective tissue disorders', 'Gastrointestinal disorders', 'Social circumstances', 'Immune system disorders', 'Reproductive system and breast disorders', 'Neoplasms benign, malignant and unspecified (incl cysts and polyps)', 'General disorders and administration site conditions', 'Endocrine disorders', 'Surgical and medical procedures', 'Vascular disorders', 'Blood and lymphatic system disorders', 'Skin and subcutaneous tissue disorders', 'Congenital, familial and genetic disorders', 'Infections and infestations', 'Respiratory, thoracic and mediastinal disorders', 'Psychiatric disorders', 'Renal and urinary disorders', 'Pregnancy, puerperium and perinatal conditions', 'Ear and labyrinth disorders', 'Cardiac disorders', 'Nervous system disorders', 'Injury, poisoning and procedural complications']\n",
      "27 tasks in total\n",
      "Loading raw samples now.\n",
      "shard_size: 8192\n",
      "About to start loading CSV from ./data/sider/deepchem/sider.csv.gz\n",
      "Loading shard 1 of size 8192.\n",
      "Featurizing sample 0\n",
      "Featurizing sample 1000\n",
      "TIMING: featurizing shard 0 took 6.886 s\n",
      "TIMING: dataset construction took 6.951 s\n",
      "Loading dataset from disk.\n",
      "1427 datapoints in SIDER dataset\n",
      "About to transform data\n",
      "TIMING: dataset construction took 0.069 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.062 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.031 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.030 s\n",
      "Loading dataset from disk.\n"
     ]
    }
   ],
   "source": [
    "sider_tasks, datasets, transformers = load_sider(split='random', seed=seed)\n",
    "train_dataset, valid_dataset, test_dataset = datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 441,
   "metadata": {},
   "outputs": [],
   "source": [
    "# side effects labels\n",
    "train_y = train_dataset.y\n",
    "valid_y = valid_dataset.y\n",
    "test_y = test_dataset.y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 442,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Finger Print\n",
    "train_fp = train_dataset.X\n",
    "valid_fp = valid_dataset.X\n",
    "test_fp = test_dataset.X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 443,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_smi = train_dataset.ids\n",
    "valid_smi = valid_dataset.ids\n",
    "test_smi = test_dataset.ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 444,
   "metadata": {},
   "outputs": [],
   "source": [
    "def smi2mean(configs, model, smi_lst):\n",
    "    mean_lst = []\n",
    "    for i in tnrange(len(smi_lst)):\n",
    "        # convert to canonical form\n",
    "        smi = smi_lst[i]\n",
    "        try:\n",
    "            smi_can = Chem.MolToSmiles(Chem.MolFromSmiles(smi))\n",
    "        except:\n",
    "            smi_can = smi\n",
    "        mean, _ = smiles2mean(configs, smi_can, model)\n",
    "        mean_lst.append(mean.squeeze().cpu().detach().numpy())\n",
    "    return np.array(mean_lst)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 445,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c98ccc4eaada46deb8190534b505f547",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=1141), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "train_mu = smi2mean(configs, model, train_smi)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 446,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3c761620e6fc45df941a792cdf8cc47c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=143), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "valid_mu = smi2mean(configs, model, valid_smi)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 447,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6a305c8745b94c24a195e5636195748a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=143), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "test_mu = smi2mean(configs, model, test_smi)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 448,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "deb6606343da4a2ebd7e1b6b24057642",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=1141), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "train_mu_e = smi2mean(configs_e, model_e, train_smi)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 449,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ec42f48ab9f94690a2dafc7f3dd0e250",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=143), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "valid_mu_e = smi2mean(configs_e, model_e, valid_smi)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 450,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "8612b905dcbc43bc82ad9f36d96bfd0c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=143), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "test_mu_e = smi2mean(configs_e, model_e, test_smi)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Custom Distance Functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 451,
   "metadata": {},
   "outputs": [],
   "source": [
    "def lor_dist(z1, z2):\n",
    "    m = z1*z2\n",
    "    lor_prod = m[1:].sum() - m[0]\n",
    "    x = - lor_prod\n",
    "    x = np.where(x<1.0, 1.0+1e-6, x)\n",
    "    return np.log(x + np.sqrt(x**2 - 1))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# KNN Classifier"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 452,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_scores(y_true, y_pred):\n",
    "    roc_aucs = []\n",
    "    n_data, n_dim = y_true.shape\n",
    "    for j in range(n_dim):\n",
    "        true_label = y_true[:, j]\n",
    "        pred_label = y_pred[:, j]\n",
    "        roc_aucs.append(roc_auc_score(true_label, pred_label))\n",
    "    return np.array(roc_aucs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 456,
   "metadata": {},
   "outputs": [],
   "source": [
    "N_val = 11\n",
    "W_val = 'distance'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Fit and Predict"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Finger Prints"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 457,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "KNeighborsClassifier(algorithm='brute', leaf_size=30, metric='rogerstanimoto',\n",
       "                     metric_params=None, n_jobs=None, n_neighbors=11, p=2,\n",
       "                     weights='distance')"
      ]
     },
     "execution_count": 457,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Finger Print Representations\n",
    "neigh_fp = KNeighborsClassifier(n_neighbors=N_val, algorithm='brute', metric='rogerstanimoto', weights=W_val)\n",
    "neigh_fp.fit(train_fp, train_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 458,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_fp = neigh_fp.predict_proba(test_fp) # 27 * size(x) * 2\n",
    "\n",
    "# covert predicted probability to an array\n",
    "n_data, n_dim = test_y.shape\n",
    "test_y_pred_fp = np.zeros((n_data, n_dim))\n",
    "for j in range(n_dim):\n",
    "    test_y_pred_fp[:, j] = pred_fp[j][:, 1]\n",
    "    \n",
    "#test_y_pred_fp = neigh_fp.predict(test_fp)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Lorentz Embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 459,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "KNeighborsClassifier(algorithm='brute', leaf_size=30,\n",
       "                     metric=<function lor_dist at 0x7ef0981f8ae8>,\n",
       "                     metric_params=None, n_jobs=None, n_neighbors=11, p=2,\n",
       "                     weights='distance')"
      ]
     },
     "execution_count": 459,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "neigh_lor = KNeighborsClassifier(n_neighbors=N_val, algorithm='brute', metric=lor_dist, weights=W_val)\n",
    "neigh_lor.fit(train_mu, train_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 460,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_lor = neigh_lor.predict_proba(test_mu) # 27 * size(x) * 2\n",
    "\n",
    "# covert predicted probability to an array\n",
    "n_data, n_dim = test_y.shape\n",
    "test_y_pred_lor = np.zeros((n_data, n_dim))\n",
    "for j in range(n_dim):\n",
    "    test_y_pred_lor[:, j] = pred_lor[j][:, 1]    \n",
    "#test_y_pred_lor = neigh_lor.predict(test_mu)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Euclidean Embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 461,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "KNeighborsClassifier(algorithm='brute', leaf_size=30, metric='minkowski',\n",
       "                     metric_params=None, n_jobs=None, n_neighbors=11, p=2,\n",
       "                     weights='distance')"
      ]
     },
     "execution_count": 461,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "neigh_euc = KNeighborsClassifier(n_neighbors=N_val, algorithm='brute', weights=W_val)\n",
    "neigh_euc.fit(train_mu_e, train_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 462,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_euc = neigh_euc.predict_proba(test_mu_e) # 27 * size(x) * 2\n",
    "\n",
    "# covert predicted probability to an array\n",
    "n_data, n_dim = test_y.shape\n",
    "test_y_pred_euc = np.zeros((n_data, n_dim))\n",
    "for j in range(n_dim):\n",
    "    test_y_pred_euc[:, j] = pred_euc[j][:, 1]    \n",
    "#test_y_pred_lor = neigh_lor.predict(test_mu)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Evaluation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 463,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.6207228576607152"
      ]
     },
     "execution_count": 463,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_scores(test_y, test_y_pred_fp).mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 464,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.6339681634232448"
      ]
     },
     "execution_count": 464,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_scores(test_y, test_y_pred_lor).mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 465,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.7001248119569888"
      ]
     },
     "execution_count": 465,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_scores(test_y, test_y_pred_euc).mean()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# RF"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Finger Print"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 466,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "RandomForestClassifier(bootstrap=True, class_weight='balanced',\n",
       "                       criterion='gini', max_depth=None, max_features='auto',\n",
       "                       max_leaf_nodes=None, min_impurity_decrease=0.0,\n",
       "                       min_impurity_split=None, min_samples_leaf=1,\n",
       "                       min_samples_split=2, min_weight_fraction_leaf=0.0,\n",
       "                       n_estimators=500, n_jobs=None, oob_score=False,\n",
       "                       random_state=None, verbose=0, warm_start=False)"
      ]
     },
     "execution_count": 466,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rf_fp = RandomForestClassifier(class_weight=\"balanced\", n_estimators=500)\n",
    "rf_fp.fit(train_fp, train_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 467,
   "metadata": {},
   "outputs": [],
   "source": [
    "rf_pred_fp = rf_fp.predict_proba(test_fp) # 27 * size(x) * 2\n",
    "\n",
    "# covert predicted probability to an array\n",
    "n_data, n_dim = test_y.shape\n",
    "test_y_rf_pred_fp = np.zeros((n_data, n_dim))\n",
    "for j in range(n_dim):\n",
    "    test_y_rf_pred_fp[:, j] = rf_pred_fp[j][:, 1]    \n",
    "#test_y_pred_lor = neigh_lor.predict(test_mu)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 468,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.6646586257362254"
      ]
     },
     "execution_count": 468,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_scores(test_y, test_y_rf_pred_fp).mean()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Lorentz Embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 469,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "RandomForestClassifier(bootstrap=True, class_weight='balanced',\n",
       "                       criterion='gini', max_depth=None, max_features='auto',\n",
       "                       max_leaf_nodes=None, min_impurity_decrease=0.0,\n",
       "                       min_impurity_split=None, min_samples_leaf=1,\n",
       "                       min_samples_split=2, min_weight_fraction_leaf=0.0,\n",
       "                       n_estimators=500, n_jobs=None, oob_score=False,\n",
       "                       random_state=None, verbose=0, warm_start=False)"
      ]
     },
     "execution_count": 469,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rf_lor = RandomForestClassifier(class_weight=\"balanced\", n_estimators=500)\n",
    "rf_lor.fit(train_mu, train_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 470,
   "metadata": {},
   "outputs": [],
   "source": [
    "rf_pred_lor = rf_lor.predict_proba(test_mu) # 27 * size(x) * 2\n",
    "\n",
    "# covert predicted probability to an array\n",
    "n_data, n_dim = test_y.shape\n",
    "test_y_rf_pred_lor = np.zeros((n_data, n_dim))\n",
    "for j in range(n_dim):\n",
    "    test_y_rf_pred_lor[:, j] = rf_pred_lor[j][:, 1]    \n",
    "#test_y_pred_lor = neigh_lor.predict(test_mu)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 471,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.6571116170898003"
      ]
     },
     "execution_count": 471,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_scores(test_y, test_y_rf_pred_lor).mean()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Euclidean Embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 472,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "RandomForestClassifier(bootstrap=True, class_weight='balanced',\n",
       "                       criterion='gini', max_depth=None, max_features='auto',\n",
       "                       max_leaf_nodes=None, min_impurity_decrease=0.0,\n",
       "                       min_impurity_split=None, min_samples_leaf=1,\n",
       "                       min_samples_split=2, min_weight_fraction_leaf=0.0,\n",
       "                       n_estimators=500, n_jobs=None, oob_score=False,\n",
       "                       random_state=None, verbose=0, warm_start=False)"
      ]
     },
     "execution_count": 472,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rf_euc = RandomForestClassifier(class_weight=\"balanced\", n_estimators=500)\n",
    "rf_euc.fit(train_mu_e, train_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 473,
   "metadata": {},
   "outputs": [],
   "source": [
    "rf_pred_euc = rf_euc.predict_proba(test_mu_e) # 27 * size(x) * 2\n",
    "\n",
    "# covert predicted probability to an array\n",
    "n_data, n_dim = test_y.shape\n",
    "test_y_rf_pred_euc = np.zeros((n_data, n_dim))\n",
    "for j in range(n_dim):\n",
    "    test_y_rf_pred_euc[:, j] = rf_pred_euc[j][:, 1]    \n",
    "#test_y_pred_lor = neigh_lor.predict(test_mu)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 474,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.6813400280403821"
      ]
     },
     "execution_count": 474,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_scores(test_y, test_y_rf_pred_euc).mean()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Deepchem Models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 475,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "About to initialize singletask to multitask model\n",
      "Initializing directory for task Hepatobiliary disorders\n",
      "Initializing directory for task Metabolism and nutrition disorders\n",
      "Initializing directory for task Product issues\n",
      "Initializing directory for task Eye disorders\n",
      "Initializing directory for task Investigations\n",
      "Initializing directory for task Musculoskeletal and connective tissue disorders\n",
      "Initializing directory for task Gastrointestinal disorders\n",
      "Initializing directory for task Social circumstances\n",
      "Initializing directory for task Immune system disorders\n",
      "Initializing directory for task Reproductive system and breast disorders\n",
      "Initializing directory for task Neoplasms benign, malignant and unspecified (incl cysts and polyps)\n",
      "Initializing directory for task General disorders and administration site conditions\n",
      "Initializing directory for task Endocrine disorders\n",
      "Initializing directory for task Surgical and medical procedures\n",
      "Initializing directory for task Vascular disorders\n",
      "Initializing directory for task Blood and lymphatic system disorders\n",
      "Initializing directory for task Skin and subcutaneous tissue disorders\n",
      "Initializing directory for task Congenital, familial and genetic disorders\n",
      "Initializing directory for task Infections and infestations\n",
      "Initializing directory for task Respiratory, thoracic and mediastinal disorders\n",
      "Initializing directory for task Psychiatric disorders\n",
      "Initializing directory for task Renal and urinary disorders\n",
      "Initializing directory for task Pregnancy, puerperium and perinatal conditions\n",
      "Initializing directory for task Ear and labyrinth disorders\n",
      "Initializing directory for task Cardiac disorders\n",
      "Initializing directory for task Nervous system disorders\n",
      "Initializing directory for task Injury, poisoning and procedural complications\n",
      "About to create task-specific datasets\n",
      "Splitting multitask dataset into singletask datasets\n",
      "TIMING: dataset construction took 0.039 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.004 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "TIMING: dataset construction took 0.003 s\n",
      "Loading dataset from disk.\n",
      "Processing shard 0\n",
      "\tTask Hepatobiliary disorders\n",
      "\tTask Metabolism and nutrition disorders\n",
      "\tTask Product issues\n",
      "\tTask Eye disorders\n",
      "\tTask Investigations\n",
      "\tTask Musculoskeletal and connective tissue disorders\n",
      "\tTask Gastrointestinal disorders\n",
      "\tTask Social circumstances\n",
      "\tTask Immune system disorders\n",
      "\tTask Reproductive system and breast disorders\n",
      "\tTask Neoplasms benign, malignant and unspecified (incl cysts and polyps)\n",
      "\tTask General disorders and administration site conditions\n",
      "\tTask Endocrine disorders\n",
      "\tTask Surgical and medical procedures\n",
      "\tTask Vascular disorders\n",
      "\tTask Blood and lymphatic system disorders\n",
      "\tTask Skin and subcutaneous tissue disorders\n",
      "\tTask Congenital, familial and genetic disorders\n",
      "\tTask Infections and infestations\n",
      "\tTask Respiratory, thoracic and mediastinal disorders\n",
      "\tTask Psychiatric disorders\n",
      "\tTask Renal and urinary disorders\n",
      "\tTask Pregnancy, puerperium and perinatal conditions\n",
      "\tTask Ear and labyrinth disorders\n",
      "\tTask Cardiac disorders\n",
      "\tTask Nervous system disorders\n",
      "\tTask Injury, poisoning and procedural complications\n",
      "Dataset for task Hepatobiliary disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Metabolism and nutrition disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Product issues has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Eye disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Investigations has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Musculoskeletal and connective tissue disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Gastrointestinal disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Social circumstances has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Immune system disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Reproductive system and breast disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Neoplasms benign, malignant and unspecified (incl cysts and polyps) has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task General disorders and administration site conditions has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Endocrine disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Surgical and medical procedures has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Vascular disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Blood and lymphatic system disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Skin and subcutaneous tissue disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Congenital, familial and genetic disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Infections and infestations has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Respiratory, thoracic and mediastinal disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Psychiatric disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Renal and urinary disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Pregnancy, puerperium and perinatal conditions has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Ear and labyrinth disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Cardiac disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Nervous system disorders has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Dataset for task Injury, poisoning and procedural complications has shape ((1141, 1024), (1141, 1), (1141, 1), (1141,))\n",
      "Fitting model for task Hepatobiliary disorders\n",
      "Fitting model for task Metabolism and nutrition disorders\n",
      "Fitting model for task Product issues\n",
      "Fitting model for task Eye disorders\n",
      "Fitting model for task Investigations\n",
      "Fitting model for task Musculoskeletal and connective tissue disorders\n",
      "Fitting model for task Gastrointestinal disorders\n",
      "Fitting model for task Social circumstances\n",
      "Fitting model for task Immune system disorders\n",
      "Fitting model for task Reproductive system and breast disorders\n",
      "Fitting model for task Neoplasms benign, malignant and unspecified (incl cysts and polyps)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Fitting model for task General disorders and administration site conditions\n",
      "Fitting model for task Endocrine disorders\n",
      "Fitting model for task Surgical and medical procedures\n",
      "Fitting model for task Vascular disorders\n",
      "Fitting model for task Blood and lymphatic system disorders\n",
      "Fitting model for task Skin and subcutaneous tissue disorders\n",
      "Fitting model for task Congenital, familial and genetic disorders\n",
      "Fitting model for task Infections and infestations\n",
      "Fitting model for task Respiratory, thoracic and mediastinal disorders\n",
      "Fitting model for task Psychiatric disorders\n",
      "Fitting model for task Renal and urinary disorders\n",
      "Fitting model for task Pregnancy, puerperium and perinatal conditions\n",
      "Fitting model for task Ear and labyrinth disorders\n",
      "Fitting model for task Cardiac disorders\n",
      "Fitting model for task Nervous system disorders\n",
      "Fitting model for task Injury, poisoning and procedural complications\n",
      "About to evaluate model\n",
      "computed_metrics: [1.0, 1.0, 1.0, 0.9999999999999999, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.999986502537523, 1.0, 1.0, 0.9999605527128463, 1.0, 1.0, 1.0, 0.9997282808254389, 1.0, 0.999994826480144]\n",
      "computed_metrics: [0.8017207278481013, 0.6526744186046511, 0.42907801418439717, 0.7349898580121703, 0.7164058283864005, 0.6521505376344086, 0.727111111111111, 0.6344189016602809, 0.657928388746803, 0.7935294117647059, 0.6884781435509983, 0.6181102362204725, 0.7134376686454398, 0.6818181818181819, 0.684228650137741, 0.7164422684618523, 0.8233471074380165, 0.6444773175542406, 0.6541909457642313, 0.6406015037593985, 0.7458538771851188, 0.7185842513259895, 0.5963561232156274, 0.6338112305854241, 0.8129817444219067, 0.6510826771653544, 0.7085798816568047]\n",
      "computed_metrics: [0.7001194743130227, 0.6123188405797102, 0.3738095238095238, 0.712280701754386, 0.7544642857142857, 0.6755482456140351, 0.7911242603550296, 0.5778761061946902, 0.680075354609929, 0.7768067556952081, 0.6625412541254125, 0.6763565891472868, 0.6424018357980622, 0.49278438030560273, 0.7628554143980641, 0.749384236453202, 0.8018447837150127, 0.6189516129032259, 0.649793388429752, 0.623139534883721, 0.7962406015037594, 0.7064908722109533, 0.5747508305647842, 0.6690196078431372, 0.7581300813008129, 0.8308416389811738, 0.5996232269503546]\n",
      "Train scores\n",
      "{'mean-roc_auc_score': 0.9999877837983685}\n",
      "Validation scores\n",
      "{'mean-roc_auc_score': 0.6863847780318456}\n",
      "Test scores\n",
      "{'mean-roc_auc_score': 0.6766508680797827}\n"
     ]
    }
   ],
   "source": [
    "\"\"\"\n",
    "Script that trains Sklearn multitask models on the sider dataset\n",
    "@Author Bharath Ramsundar, Aneesh Pappu\n",
    "\"\"\"\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "\n",
    "metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean,\n",
    "                           mode=\"classification\")\n",
    "\n",
    "def model_builder(model_dir):\n",
    "    sklearn_model = RandomForestClassifier(class_weight=\"balanced\", n_estimators=100)\n",
    "    return dc.models.SklearnModel(sklearn_model, model_dir)\n",
    "\n",
    "model = dc.models.SingletaskToMultitask(sider_tasks, model_builder)\n",
    "\n",
    "# Fit trained model\n",
    "model.fit(train_dataset)\n",
    "model.save()\n",
    "\n",
    "print(\"About to evaluate model\")\n",
    "train_scores = model.evaluate(train_dataset, [metric], transformers)\n",
    "valid_scores = model.evaluate(valid_dataset, [metric], transformers)\n",
    "test_scores = model.evaluate(test_dataset, [metric], transformers)\n",
    "\n",
    "print(\"Train scores\")\n",
    "print(train_scores)\n",
    "\n",
    "print(\"Validation scores\")\n",
    "print(valid_scores)\n",
    "\n",
    "print(\"Test scores\")\n",
    "print(test_scores)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "#seed = 100, 99, 98, 97, 95\n",
    "\n",
    "knn_fp_auc = [ 0.6489, 0.6077, 0.6403, 0.5954, 0.6458]\n",
    "knn_lor_auc = [0.6752, 0.6724, 0.6710, 0.6691, 0.6919]\n",
    "knn_euc_auc = [0.6815, 0.6887, 0.6857, 0.6804, 0.7213]\n",
    "rf_fp_auc = [0.6826, 0.6684, 0.6665, 0.6783, 0.6497]\n",
    "rf_lor_auc = [0.7123, 0.7227, 0.6968, 0.6928, 0.6983]\n",
    "rf_euc_auc = [0.7099, 0.7018, 0.6825, 0.6906, 0.6905]\n",
    "\n",
    "# no ATC information, only chemical structures\n",
    "rf_lor_chem_auc = [0.6431, 0.6387, 0.6093, 0.5854, 0.6375]\n",
    "rf_euc_chem_auc = [0.6359, 0.6140, 0.6292, 0.5955, 0.6215]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "rf_fp_arr = np.array(rf_fp_auc)\n",
    "rf_lor_arr = np.array(rf_lor_auc)\n",
    "rf_euc_arr = np.array(rf_euc_auc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "FP mean:0.6691, std:0.011406138698087077\n"
     ]
    }
   ],
   "source": [
    "print('FP mean:' + str(rf_fp_arr.mean()) + ', std:' + str(rf_fp_arr.std()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Lorentz mean:0.7045800000000001, std:0.01119524899231814\n"
     ]
    }
   ],
   "source": [
    "print('Lorentz mean:' + str(rf_lor_arr.mean()) + ', std:' + str(rf_lor_arr.std()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Euclidean mean:0.69506, std:0.009634230638717335\n"
     ]
    }
   ],
   "source": [
    "print('Euclidean mean:' + str(rf_euc_arr.mean()) + ', std:' + str(rf_euc_arr.std()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Stats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "from scipy import stats"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Paired T-test between RF+FP and RF+LDE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Ttest_relResult(statistic=-4.951889056380475, pvalue=0.0077507605194669415)"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "stats.ttest_rel(rf_fp_arr,rf_lor_arr)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Paired T-test between RF+LDE and RF+EUC"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Ttest_relResult(statistic=-2.6424674113529942, pvalue=0.05743200947957062)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "stats.ttest_rel(rf_euc_arr,rf_lor_arr)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "MolEnv3",
   "language": "python",
   "name": "molenv3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {
    "height": "calc(100% - 180px)",
    "left": "10px",
    "top": "150px",
    "width": "238px"
   },
   "toc_section_display": true,
   "toc_window_display": true
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
