{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Implementing CLSM - Keras"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Purpose\n",
    "The purpose of this notebook is to implement Microsoft's [Convolutional Latent Semantic Model](http://www.iro.umontreal.ca/~lisa/pointeurs/ir0895-he-2.pdf) in Keras, and evaluate it on our dataset.\n",
    "\n",
    "## Inputs\n",
    "- This notebook requires *wiki-pages* from the FEVER dataset as an input."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:15:19.178373Z",
     "start_time": "2018-11-22T01:15:17.472254Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import gc\n",
    "import pickle\n",
    "from multiprocessing import Pool, cpu_count\n",
    "\n",
    "import joblib\n",
    "import keras\n",
    "import nltk\n",
    "import numpy as np\n",
    "from joblib import Parallel, delayed\n",
    "from scipy import sparse\n",
    "from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n",
    "from tqdm import tqdm_notebook\n",
    "\n",
    "import utils"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:15:19.185035Z",
     "start_time": "2018-11-22T01:15:19.180463Z"
    }
   },
   "outputs": [],
   "source": [
    "from joblib import Memory\n",
    "memory = Memory(location='/tmp', verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Preprocessing Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:15:21.723480Z",
     "start_time": "2018-11-22T01:15:19.186283Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Num Distinct Claims 109810\n",
      "Num Data Points 125050\n"
     ]
    }
   ],
   "source": [
    "claims, labels, article_list, claim_set, claim_to_article = utils.extract_fever_jsonl_data(\"../train.jsonl\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:15:32.771862Z",
     "start_time": "2018-11-22T01:15:21.729154Z"
    }
   },
   "outputs": [],
   "source": [
    "with open(\"train.pkl\", \"rb\") as f:\n",
    "    train_dict = pickle.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:15:32.846485Z",
     "start_time": "2018-11-22T01:15:32.773556Z"
    }
   },
   "outputs": [],
   "source": [
    "for idx in range(len(train_dict)):\n",
    "    if train_dict[idx]['claim'] not in claim_set:\n",
    "        print(\"error\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-12T03:35:54.133356Z",
     "start_time": "2018-11-12T03:35:54.121855Z"
    }
   },
   "outputs": [],
   "source": [
    "with open(\"encoder.pkl\", \"wb\") as f:\n",
    "    pickle.dump(encoder, f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-12T03:36:16.419492Z",
     "start_time": "2018-11-12T03:36:14.056712Z"
    }
   },
   "outputs": [],
   "source": [
    "with open(\"feature_encoder.pkl\", \"wb\") as f:\n",
    "    pickle.dump(feature_encoder, f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-12T03:32:26.896181Z",
     "start_time": "2018-11-12T03:05:21.472028Z"
    }
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "be60fc748825465ba4134cf4a1fa8070",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=125051), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "34607ecef76c4a688282a53cefcb714c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=125051), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a3ab45d933ca4db5b1f7f6f682b9e938",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=3592118), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "LabelEncoder()"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "processed_claims = utils.generate_all_tokens(claims)\n",
    "all_evidence = []\n",
    "\n",
    "for query in tqdm_notebook(train_dict):\n",
    "    all_evidence.extend([utils.preprocess_article_name(i) for i in query['evidence']])\n",
    "    \n",
    "processed_claims.extend(utils.generate_all_tokens(list(set(all_evidence))))\n",
    "\n",
    "possible_tokens = list(set(processed_claims))\n",
    "\n",
    "encoder = LabelEncoder()\n",
    "encoder.fit(np.array(sorted(possible_tokens)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-12T03:32:27.006809Z",
     "start_time": "2018-11-12T03:32:26.928314Z"
    }
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e537e8726221450ea2c20bbdd7ae47af",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "feature_encoder = {}\n",
    "for idx, e in tqdm_notebook(enumerate(encoder.classes_)):\n",
    "    feature_encoder[e] = idx"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-09T04:59:13.851183Z",
     "start_time": "2018-11-09T04:58:21.634Z"
    }
   },
   "outputs": [],
   "source": [
    "load_processed_claims = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2018-11-09T03:20:28.473Z"
    }
   },
   "outputs": [],
   "source": [
    "claim_set = dict()\n",
    "evidence_set = dict()\n",
    "pool = Pool(processes=6)\n",
    "\n",
    "for t in tqdm_notebook(train_dict):\n",
    "    claim_set[t['claim']] = utils.tokenize_claim(t['claim'], encoder, feature_encoder)\n",
    "    evidences = t['evidence']\n",
    "    evidences = [utils.preprocess_article_name(i.split(\"http://wikipedia.org/wiki/\")[1]) for i in evidences]\n",
    "    for e in evidences:\n",
    "        if e not in evidence_set:\n",
    "            evidence_set[e] = utils.tokenize_claim(e, encoder, feature_encoder)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "s = 0\n",
    "total = 0\n",
    "for i in tqdm_notebook(range(len(train_dict))):\n",
    "    articles = [utils.preprocess_article_name(j.split(\"http://wikipedia.org/wiki/\")[1]) for j in train_dict[i]['evidence']]\n",
    "    true_articles = claim_to_article[train_dict[i]['claim']]\n",
    "    total += len(true_articles)\n",
    "    if true_articles[0] not in articles:\n",
    "            s += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "articles = [utils.preprocess_article_name(i.split(\"http://wikipedia.org/wiki/\")[1]) for i in train_dict[0]['evidence']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext line_profiler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%lprun -f process_claim process_claim(5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_dict[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "all_data = []\n",
    "\n",
    "article_set = set(article_list)\n",
    "\n",
    "def process_claim(idx):\n",
    "    J = 399\n",
    "    data = {}\n",
    "    articles = [utils.preprocess_article_name(i.split(\"http://wikipedia.org/wiki/\")[1]) for i in train_dict[idx]['evidence']]\n",
    "    data['claim'] = utils.tokenize_claim(train_dict[idx]['claim'], encoder)\n",
    "    true_article = claim_to_article[train_dict[idx]['claim']][0]\n",
    "    true_article_idx = articles.index(true_article)\n",
    "    data['positive_article'] = utils.tokenize_claim(true_article, encoder)\n",
    "    negative_articles = articles[:true_article_idx] + articles[true_article_idx+1:]\n",
    "    negative_articles = [utils.tokenize_claim(i, encoder) for i in negative_articles]\n",
    "    for i in range(J):\n",
    "        data['negative_article_{}'.format(i)] = negative_articles[i]\n",
    "    return data\n",
    "\n",
    "# all_data = utils.parallel_process(range(len(train_dict)), process_claim, n_jobs=12)\n",
    "\n",
    "# with open(\"all_data.pkl_lucene\", \"wb\") as f:\n",
    "#     pickle.dump(all_data, f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if load_processed_claims:\n",
    "    with open(\"all_data.pkl\", \"rb\") as f:\n",
    "        all_data = pickle.load(f)\n",
    "else:\n",
    "    all_data = []\n",
    "\n",
    "    article_set = set(article_list)\n",
    "    \n",
    "    def process_claim(idx):\n",
    "        J = 4\n",
    "        data = {}\n",
    "        data['claim'] = utils.tokenize_claim(claims[idx], encoder)\n",
    "        data['positive_article'] = utils.tokenize_claim(article_list[idx], encoder)\n",
    "        negative_articles = np.random.choice(list(article_set - set(claim_to_article[claims[idx]])), J)\n",
    "        negative_articles = [utils.tokenize_claim(i, encoder) for i in negative_articles]\n",
    "        for i in range(J):\n",
    "            data['negative_article_{}'.format(i)] = negative_articles[i]\n",
    "        return data\n",
    "\n",
    "    all_data = utils.parallel_process(range(len(claims)), process_claim, n_jobs=6)\n",
    "    \n",
    "    with open(\"all_data.pkl\", \"wb\") as f:\n",
    "        pickle.dump(all_data, f)\n",
    "    #all_data = Parallel(n_jobs=cpu_count(), verbose=1, prefer=\"threads\")(delayed(process_claim)(i) for i in range(len(claims)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Beginning the Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:15:54.459038Z",
     "start_time": "2018-11-22T01:15:52.891536Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[name: \"/device:CPU:0\"\n",
      "device_type: \"CPU\"\n",
      "memory_limit: 268435456\n",
      "locality {\n",
      "}\n",
      "incarnation: 5845569699656758029\n",
      ", name: \"/device:GPU:0\"\n",
      "device_type: \"GPU\"\n",
      "memory_limit: 11981596263\n",
      "locality {\n",
      "  bus_id: 1\n",
      "  links {\n",
      "    link {\n",
      "      device_id: 1\n",
      "      type: \"StreamExecutor\"\n",
      "      strength: 1\n",
      "    }\n",
      "  }\n",
      "}\n",
      "incarnation: 3010196439425632895\n",
      "physical_device_desc: \"device: 0, name: GeForce GTX TITAN X, pci bus id: 0000:02:00.0, compute capability: 5.2\"\n",
      ", name: \"/device:GPU:1\"\n",
      "device_type: \"GPU\"\n",
      "memory_limit: 10056728576\n",
      "locality {\n",
      "  bus_id: 1\n",
      "  links {\n",
      "    link {\n",
      "      type: \"StreamExecutor\"\n",
      "      strength: 1\n",
      "    }\n",
      "  }\n",
      "}\n",
      "incarnation: 1431079868980050082\n",
      "physical_device_desc: \"device: 1, name: GeForce GTX TITAN X, pci bus id: 0000:03:00.0, compute capability: 5.2\"\n",
      "]\n"
     ]
    }
   ],
   "source": [
    "from tensorflow.python.client import device_lib\n",
    "print(device_lib.list_local_devices())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:16:14.791471Z",
     "start_time": "2018-11-22T01:16:14.766304Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "%run deep_semantic_similarity_keras.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:16:25.069499Z",
     "start_time": "2018-11-22T01:16:24.845305Z"
    }
   },
   "outputs": [],
   "source": [
    "model = create_model()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:16:15.750868Z",
     "start_time": "2018-11-22T01:16:15.745642Z"
    }
   },
   "outputs": [],
   "source": [
    "from scipy import sparse\n",
    "from matplotlib import pyplot as plt\n",
    "import numpy as np\n",
    "from keras.utils import multi_gpu_model\n",
    "import keras\n",
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:16:45.765111Z",
     "start_time": "2018-11-22T01:16:45.760908Z"
    }
   },
   "outputs": [],
   "source": [
    "load_processed_claims = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:17:02.481890Z",
     "start_time": "2018-11-22T01:16:46.343642Z"
    }
   },
   "outputs": [],
   "source": [
    "if load_processed_claims:\n",
    "    with open(\"saved_data.pkl\", \"rb\") as f:\n",
    "        data = pickle.load(f)\n",
    "else:\n",
    "    data = {\"claim\":[], \"positive_article\":[], \"negative_article_0\":[], \"negative_article_1\":[], \\\n",
    "            \"negative_article_2\":[], \"negative_article_3\":[]}\n",
    "\n",
    "    for d in tqdm_notebook(all_data):\n",
    "        data['claim'].append(scipy.sparse.vstack(d['claim']))\n",
    "        data['positive_article'].append(scipy.sparse.vstack(d['positive_article']))\n",
    "        data['negative_article_0'].append(scipy.sparse.vstack(d['negative_article_0']))\n",
    "        data['negative_article_1'].append(scipy.sparse.vstack(d['negative_article_1']))\n",
    "        data['negative_article_2'].append(scipy.sparse.vstack(d['negative_article_2']))\n",
    "        data['negative_article_3'].append(scipy.sparse.vstack(d['negative_article_3']))\n",
    "\n",
    "    with open(\"saved_data.pkl\", \"wb\") as f:\n",
    "        pickle.dump(data, f)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Next, we work on training the model in a batchsize manner."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-09T07:52:01.838601Z",
     "start_time": "2018-11-09T07:52:01.752789Z"
    }
   },
   "outputs": [],
   "source": [
    "y = np.zeros((1, J+1))\n",
    "y[:,0] = 1\n",
    "y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:17:16.170629Z",
     "start_time": "2018-11-22T01:17:16.161797Z"
    }
   },
   "outputs": [],
   "source": [
    "def stack_uneven(arrays, fill_value=0.):\n",
    "        '''\n",
    "        Fits arrays into a single numpy array, even if they are\n",
    "        different sizes. `fill_value` is the default value.\n",
    "\n",
    "        Args:\n",
    "                arrays: list of np arrays of various sizes\n",
    "                    (must be same rank, but not necessarily same size)\n",
    "                fill_value (float, optional):\n",
    "\n",
    "        Returns:\n",
    "                np.ndarray\n",
    "        '''\n",
    "        sizes = [a.shape for a in arrays]\n",
    "        max_sizes = np.max(list(zip(*sizes)), -1)\n",
    "        # The resultant array has stacked on the first dimension\n",
    "        result = np.full((len(arrays),) + tuple(max_sizes), fill_value)\n",
    "        for i, a in enumerate(arrays):\n",
    "          # The shape of this array `a`, turned into slices\n",
    "          slices = tuple(slice(0,s) for s in sizes[i])\n",
    "          # Overwrite a block slice of `result` with this array `a`\n",
    "          result[i][slices] = a\n",
    "        return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-17T23:05:57.449263Z",
     "start_time": "2018-11-17T23:05:57.312427Z"
    }
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'all_data' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-10-d69547a8760b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mall_data\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      2\u001b[0m     \u001b[0mto_stack\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      3\u001b[0m     \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m         \u001b[0mto_stack\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvstack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtodense\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mall_data\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mNameError\u001b[0m: name 'all_data' is not defined"
     ]
    }
   ],
   "source": [
    "for k, v in all_data[0].items():\n",
    "    to_stack = []\n",
    "    for i in [0]:\n",
    "        to_stack.append(np.vstack([j.todense() for j in all_data[i][k]]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:17:39.753543Z",
     "start_time": "2018-11-22T01:17:20.654356Z"
    }
   },
   "outputs": [],
   "source": [
    "all_data = joblib.load(\"all_data_lucene_pt_12.pkl\", \"rb\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:18:03.889856Z",
     "start_time": "2018-11-22T01:18:03.881311Z"
    }
   },
   "outputs": [],
   "source": [
    "class DataGenerator(keras.utils.Sequence):\n",
    "    \"\"\"\n",
    "    Generates data with batch size of 1 sample for the purposes of training our model.\n",
    "    \"\"\"\n",
    "    def __init__(self, data, J, batch_size=32, split=None):\n",
    "        \"\"\"\n",
    "            Sets the initial arguments and creates\n",
    "            an indicies array to randomize the dataset\n",
    "            between epochs\n",
    "        \"\"\"\n",
    "        if split:            \n",
    "            self.indicies = split\n",
    "        else:\n",
    "            self.indicies = list(range(len(data)))\n",
    "        self.data = data\n",
    "        self.J = J\n",
    "        self.batch_size = batch_size\n",
    "        \n",
    "    def __len__(self):\n",
    "        return int(np.floor(len(self.indicies) / self.batch_size))\n",
    "    \n",
    "    def __getitem__(self, index):\n",
    "        return self.get_item(index)\n",
    "    \n",
    "    def get_item(self, index):            \n",
    "            \n",
    "        final = {}\n",
    "        #idx = self.indicies[index*self.batch_size:(index+1)*self.batch_size]  # help randomly shuffle the dataset\n",
    "        idx = self.indicies[index]\n",
    "        for k in self.data[0].keys():\n",
    "            final[k] = np.expand_dims(sparse.vstack(self.data[idx][k]).todense(),0)\n",
    "            #print(\"Stacking array {}\".format(k))\n",
    "            \n",
    "#             arrays = np.array(arrays)\n",
    "#             lens = np.array([len(i) for i in arrays])\n",
    "\n",
    "#             # Mask of valid places in each row\n",
    "#             mask = np.arange(lens.max()) < lens[:,None]\n",
    "\n",
    "#             # Setup output array and put elements from data into masked positions\n",
    "#             out = np.zeros(mask.shape, dtype=arrays.dtype)\n",
    "#             out[mask] = np.vstack(arrays)\n",
    "        \n",
    "            #final[k] = np.array(arrays)\n",
    "            \n",
    "        y = np.zeros((self.batch_size, self.J+1))\n",
    "        y[:,0] = 1\n",
    "\n",
    "        return final, y\n",
    "    \n",
    "    def on_epoch_end(self):\n",
    "        #np.random.shuffle(self.indicies)\n",
    "        pass"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:18:05.243981Z",
     "start_time": "2018-11-22T01:18:05.237663Z"
    }
   },
   "outputs": [],
   "source": [
    "generator = DataGenerator(all_data, 399, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:19:03.151628Z",
     "start_time": "2018-11-22T01:19:00.132584Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "367 ms ± 2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
     ]
    }
   ],
   "source": [
    "%timeit d = generator.get_item(6)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "d[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:20:53.856227Z",
     "start_time": "2018-11-22T01:20:53.850845Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict_keys(['negative_article_10', 'negative_article_173', 'negative_article_263', 'negative_article_297', 'negative_article_367', 'negative_article_92', 'negative_article_384', 'negative_article_272', 'negative_article_323', 'negative_article_204', 'negative_article_386', 'negative_article_220', 'negative_article_139', 'negative_article_225', 'negative_article_82', 'negative_article_394', 'negative_article_368', 'negative_article_61', 'negative_article_142', 'negative_article_363', 'negative_article_86', 'negative_article_266', 'negative_article_308', 'negative_article_31', 'negative_article_53', 'negative_article_154', 'negative_article_213', 'negative_article_314', 'negative_article_325', 'negative_article_131', 'negative_article_205', 'negative_article_212', 'negative_article_125', 'negative_article_378', 'negative_article_98', 'negative_article_340', 'negative_article_298', 'negative_article_289', 'negative_article_79', 'negative_article_81', 'negative_article_152', 'negative_article_245', 'negative_article_382', 'negative_article_211', 'negative_article_305', 'negative_article_88', 'negative_article_359', 'negative_article_155', 'negative_article_8', 'negative_article_12', 'negative_article_365', 'negative_article_149', 'negative_article_249', 'negative_article_46', 'negative_article_95', 'negative_article_51', 'negative_article_357', 'negative_article_148', 'negative_article_84', 'negative_article_78', 'negative_article_276', 'negative_article_267', 'negative_article_341', 'negative_article_320', 'negative_article_144', 'negative_article_39', 'negative_article_328', 'negative_article_21', 'negative_article_362', 'negative_article_300', 'negative_article_183', 'negative_article_375', 'negative_article_143', 'negative_article_141', 'negative_article_339', 'negative_article_281', 'negative_article_123', 'negative_article_327', 'negative_article_270', 'negative_article_317', 'negative_article_397', 'negative_article_34', 'negative_article_176', 'negative_article_354', 'negative_article_348', 'negative_article_361', 'negative_article_198', 'negative_article_290', 'negative_article_230', 'negative_article_54', 'negative_article_40', 'negative_article_262', 'negative_article_109', 'negative_article_369', 'negative_article_206', 'negative_article_137', 'negative_article_366', 'negative_article_32', 'negative_article_93', 'claim', 'negative_article_103', 'negative_article_385', 'negative_article_184', 'negative_article_326', 'negative_article_374', 'negative_article_322', 'negative_article_398', 'negative_article_265', 'negative_article_157', 'negative_article_15', 'negative_article_255', 'negative_article_229', 'negative_article_227', 'negative_article_122', 'negative_article_134', 'negative_article_318', 'negative_article_364', 'negative_article_175', 'negative_article_334', 'negative_article_5', 'negative_article_288', 'negative_article_232', 'negative_article_395', 'negative_article_28', 'negative_article_237', 'negative_article_179', 'negative_article_333', 'negative_article_190', 'negative_article_106', 'negative_article_280', 'negative_article_185', 'negative_article_294', 'negative_article_156', 'negative_article_16', 'negative_article_310', 'negative_article_101', 'negative_article_33', 'negative_article_241', 'negative_article_52', 'negative_article_116', 'negative_article_158', 'negative_article_356', 'negative_article_393', 'negative_article_167', 'negative_article_36', 'negative_article_219', 'negative_article_153', 'negative_article_332', 'negative_article_71', 'negative_article_199', 'negative_article_388', 'negative_article_261', 'negative_article_4', 'negative_article_338', 'negative_article_299', 'negative_article_178', 'negative_article_194', 'negative_article_224', 'negative_article_321', 'negative_article_358', 'negative_article_286', 'negative_article_43', 'negative_article_47', 'negative_article_373', 'negative_article_80', 'negative_article_188', 'negative_article_118', 'negative_article_22', 'negative_article_186', 'negative_article_17', 'negative_article_302', 'negative_article_189', 'negative_article_138', 'negative_article_65', 'negative_article_370', 'negative_article_111', 'negative_article_247', 'negative_article_181', 'negative_article_90', 'negative_article_2', 'negative_article_68', 'negative_article_240', 'negative_article_222', 'negative_article_151', 'negative_article_307', 'negative_article_301', 'negative_article_379', 'negative_article_146', 'negative_article_75', 'negative_article_311', 'negative_article_233', 'negative_article_257', 'negative_article_41', 'negative_article_275', 'negative_article_35', 'negative_article_105', 'negative_article_200', 'negative_article_163', 'negative_article_160', 'negative_article_337', 'negative_article_120', 'negative_article_7', 'negative_article_18', 'negative_article_228', 'negative_article_207', 'negative_article_238', 'negative_article_77', 'negative_article_389', 'negative_article_104', 'negative_article_50', 'negative_article_29', 'negative_article_216', 'negative_article_259', 'negative_article_353', 'negative_article_383', 'negative_article_244', 'negative_article_391', 'negative_article_117', 'negative_article_292', 'negative_article_345', 'negative_article_66', 'negative_article_73', 'negative_article_215', 'negative_article_60', 'negative_article_196', 'negative_article_69', 'negative_article_100', 'negative_article_132', 'negative_article_208', 'negative_article_126', 'negative_article_70', 'negative_article_329', 'negative_article_48', 'negative_article_202', 'negative_article_62', 'negative_article_150', 'negative_article_64', 'negative_article_352', 'negative_article_392', 'negative_article_25', 'negative_article_1', 'negative_article_182', 'negative_article_169', 'negative_article_191', 'negative_article_342', 'negative_article_377', 'negative_article_214', 'negative_article_24', 'negative_article_380', 'negative_article_23', 'negative_article_260', 'negative_article_58', 'negative_article_6', 'negative_article_250', 'negative_article_187', 'negative_article_170', 'negative_article_324', 'negative_article_351', 'negative_article_147', 'negative_article_268', 'negative_article_174', 'negative_article_221', 'negative_article_312', 'negative_article_203', 'negative_article_309', 'negative_article_355', 'negative_article_343', 'negative_article_168', 'negative_article_331', 'negative_article_164', 'negative_article_161', 'negative_article_264', 'negative_article_304', 'negative_article_274', 'negative_article_236', 'negative_article_102', 'negative_article_159', 'negative_article_44', 'negative_article_306', 'negative_article_256', 'negative_article_63', 'negative_article_248', 'negative_article_396', 'negative_article_55', 'negative_article_72', 'negative_article_251', 'negative_article_56', 'negative_article_26', 'negative_article_239', 'negative_article_376', 'negative_article_293', 'negative_article_74', 'negative_article_9', 'negative_article_57', 'negative_article_381', 'negative_article_121', 'negative_article_30', 'negative_article_91', 'negative_article_330', 'negative_article_11', 'negative_article_372', 'negative_article_37', 'negative_article_85', 'negative_article_96', 'negative_article_27', 'negative_article_217', 'negative_article_282', 'negative_article_13', 'negative_article_192', 'negative_article_258', 'negative_article_278', 'negative_article_315', 'positive_article', 'negative_article_119', 'negative_article_347', 'negative_article_83', 'negative_article_94', 'negative_article_303', 'negative_article_197', 'negative_article_346', 'negative_article_223', 'negative_article_284', 'negative_article_67', 'negative_article_99', 'negative_article_243', 'negative_article_20', 'negative_article_130', 'negative_article_283', 'negative_article_177', 'negative_article_87', 'negative_article_89', 'negative_article_172', 'negative_article_180', 'negative_article_218', 'negative_article_335', 'negative_article_210', 'negative_article_171', 'negative_article_38', 'negative_article_140', 'negative_article_145', 'negative_article_135', 'negative_article_14', 'negative_article_59', 'negative_article_76', 'negative_article_162', 'negative_article_97', 'negative_article_291', 'negative_article_165', 'negative_article_273', 'negative_article_271', 'negative_article_193', 'negative_article_42', 'negative_article_279', 'negative_article_295', 'negative_article_242', 'negative_article_49', 'negative_article_269', 'negative_article_371', 'negative_article_114', 'negative_article_115', 'negative_article_252', 'negative_article_45', 'negative_article_336', 'negative_article_349', 'negative_article_0', 'negative_article_107', 'negative_article_127', 'negative_article_360', 'negative_article_296', 'negative_article_231', 'negative_article_226', 'negative_article_390', 'negative_article_253', 'negative_article_319', 'negative_article_124', 'negative_article_133', 'negative_article_387', 'negative_article_110', 'negative_article_166', 'negative_article_246', 'negative_article_254', 'negative_article_128', 'negative_article_235', 'negative_article_209', 'negative_article_285', 'negative_article_19', 'negative_article_313', 'negative_article_112', 'negative_article_3', 'negative_article_344', 'negative_article_350', 'negative_article_108', 'negative_article_287', 'negative_article_277', 'negative_article_129', 'negative_article_201', 'negative_article_195', 'negative_article_136', 'negative_article_113', 'negative_article_234', 'negative_article_316'])"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "d[0].keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:20:46.570476Z",
     "start_time": "2018-11-22T01:20:46.551800Z"
    }
   },
   "outputs": [
    {
     "ename": "KeyError",
     "evalue": "'documents'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyError\u001b[0m                                  Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-28-9878fdd510d1>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0md\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'documents'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mKeyError\u001b[0m: 'documents'"
     ]
    }
   ],
   "source": [
    "d[0]['documents'].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-22T01:18:18.788367Z",
     "start_time": "2018-11-22T01:18:17.396023Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import gc\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-09T07:43:07.822224Z",
     "start_time": "2018-11-09T07:43:07.814973Z"
    }
   },
   "outputs": [],
   "source": [
    "idxs_to_remove"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-09T07:52:55.198309Z",
     "start_time": "2018-11-09T07:52:55.191558Z"
    }
   },
   "outputs": [],
   "source": [
    "idxs_to_remove = []\n",
    "for idx, e in enumerate(all_data):\n",
    "    if type(e)!=dict:\n",
    "        idxs_to_remove.append(idx)\n",
    "\n",
    "for e in idxs_to_remove[::-1]:\n",
    "    all_data.pop(e)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "parallel_model = multi_gpu_model(model, gpus=2)\n",
    "parallel_model.compile(loss='categorical_crossentropy',\n",
    "                       optimizer='adadelta')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-11-09T14:26:47.976849Z",
     "start_time": "2018-11-09T14:26:47.927630Z"
    }
   },
   "outputs": [],
   "source": [
    "model.compile(loss=\"categorical_crossentropy\", optimizer=\"adadelta\", metrics=['accuracy'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def reset_weights(model):\n",
    "    session = backend.get_session()\n",
    "    for layer in model.layers: \n",
    "        if hasattr(layer, 'kernel_initializer'):\n",
    "            layer.kernel.initializer.run(session=session)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "reset_weights(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2018-11-09T14:34:44.385Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "model.fit_generator(generator=generator, epochs=20, use_multiprocessing=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "validation = DataGenerator(data, J, split=range(90000, 125000))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.evaluate_generator(generator=validation)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in tqdm_notebook(range(len(data['claim']))):\n",
    "    batch = {\"claim\":[], \"positive_article\":[], \"negative_article_0\":[], \"negative_article_1\":[], \\\n",
    "        \"negative_article_2\":[], \"negative_article_3\":[]}\n",
    "    batch['claim'] = np.expand_dims(data['claim'][i].todense(), 0)\n",
    "    batch['positive_article'] = np.expand_dims(data['positive_article'][i].todense(), 0)\n",
    "    batch['negative_article_0'] = np.expand_dims(data['negative_article_0'][i].todense(), 0)\n",
    "    batch['negative_article_1'] = np.expand_dims(data['negative_article_1'][i].todense(), 0)\n",
    "    batch['negative_article_2'] = np.expand_dims(data['negative_article_2'][i].todense(), 0)\n",
    "    batch['negative_article_3'] = np.expand_dims(data['negative_article_3'][i].todense(), 0)\n",
    "    model.fit(batch, y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.fit(data, y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "article_set = set(article_list)\n",
    "\n",
    "def process_claim(idx):\n",
    "    data = {}\n",
    "    data['claim'] = utils.tokenize_claim(claims[idx], encoder)\n",
    "    data['positive_article'] = utils.tokenize_claim(article_list[idx], encoder)\n",
    "    negative_articles = np.random.choice(list(article_set - set(claim_to_article[claims[idx]])), J)\n",
    "    negative_articles = [utils.tokenize_claim(i, encoder) for i in negative_articles]\n",
    "    data['negative_article'] = negative_articles\n",
    "    return data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "process_claim(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "np.argwhere(all_data[0]['claim'][0]==0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.inputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.fit()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
