{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from __future__ import print_function\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import os\n",
    "import time\n",
    "import datetime\n",
    "from scnn_model import SCNN_MODEL\n",
    "from tensorflow.contrib import learn\n",
    "from six.moves import cPickle as pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load vocabulary and the word2vec model\n",
    "pickle_file = 'save.pickle'\n",
    "with open(pickle_file, 'rb') as f :\n",
    "    save = pickle.load(f)\n",
    "    wordsVectors = save['wordsVectors']\n",
    "    vocabulary = save['vocabulary']\n",
    "    del save  # hint to help gc free up memory\n",
    "print('Vocabulary and the word2vec loaded')\n",
    "print('Vocabulary size is ', len(vocabulary))\n",
    "print('Word2Vec model shape is ', wordsVectors.shape)\n",
    "\n",
    "#Load training data, training labels, validation data, validation labels\n",
    "pickle_file = 'data_saved.pickle'\n",
    "with open(pickle_file, 'rb') as f :\n",
    "    save = pickle.load(f)\n",
    "    train_data = save['train_data']\n",
    "    train_labels = save['train_labels']\n",
    "    validation_data = save['validation_data']\n",
    "    validation_labels = save['validation_labels']\n",
    "    del save  # hint to help gc free up memory\n",
    "print('train data shape ', train_data.shape)\n",
    "print('train labels shape ', train_labels.shape)\n",
    "print('validation data shape ', validation_data.shape)\n",
    "print('validation labels shape ', validation_labels.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def batch_iter(data, batch_size, num_epochs, shuffle=True):\n",
    "    \"\"\"\n",
    "    Generates a batch iterator for a dataset.\n",
    "    Original taken from https://github.com/dennybritz/cnn-text-classification-tf/blob/master/data_helpers.py\n",
    "    \"\"\"\n",
    "    data = np.array(data)\n",
    "    data_size = len(data)\n",
    "    num_batches_per_epoch = int((len(data)-1)/batch_size) + 1\n",
    "    for epoch in range(num_epochs):\n",
    "        # Shuffle the data at each epoch\n",
    "        if shuffle:\n",
    "            shuffle_indices = np.random.permutation(np.arange(data_size))\n",
    "            shuffled_data = data[shuffle_indices]\n",
    "        else:\n",
    "            shuffled_data = data\n",
    "        for batch_num in range(num_batches_per_epoch):\n",
    "            start_index = batch_num * batch_size\n",
    "            end_index = min((batch_num + 1) * batch_size, data_size)\n",
    "            yield shuffled_data[start_index:end_index]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Model Hyperparameters\n",
    "SENTENCE_PER_REVIEW = 16\n",
    "WORDS_PER_SENTENCE = 10\n",
    "EMBEDDING_DIM = 300\n",
    "FILTER_WIDTHS_SENT_CONV = np.array([3, 4, 5])\n",
    "NUM_FILTERS_SENT_CONV = 100\n",
    "FILTER_WIDTHS_DOC_CONV = np.array([3, 4, 5])\n",
    "NUM_FILTERS_DOC_CONV = 100\n",
    "NUM_CLASSES = 2\n",
    "DROPOUT_KEEP_PROB = 0.5\n",
    "L2_REG_LAMBDA = 0.0\n",
    "BATCH_SIZE = 64\n",
    "NUM_EPOCHS = 100\n",
    "EVALUATE_EVERY = 100   # Evaluate model on the validation set after 100 steps\n",
    "CHECKPOINT_EVERY = 100 # Save model after each 200 steps\n",
    "NUM_CHECKPOINTS = 5    # Keep only the 5 most recents checkpoints\n",
    "LEARNING_RATE = 1e-3   # The learning rate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''Training the model'''\n",
    "with tf.Graph().as_default():\n",
    "    sess = tf.Session()\n",
    "    with sess.as_default():\n",
    "        cnn = SCNN_MODEL(sentence_per_review=SENTENCE_PER_REVIEW, \n",
    "                        words_per_sentence=WORDS_PER_SENTENCE, \n",
    "                        wordVectors=wordsVectors, \n",
    "                        embedding_size=EMBEDDING_DIM, \n",
    "                        filter_widths_sent_conv=FILTER_WIDTHS_SENT_CONV, \n",
    "                        num_filters_sent_conv=NUM_FILTERS_SENT_CONV, \n",
    "                        filter_widths_doc_conv=FILTER_WIDTHS_DOC_CONV, \n",
    "                        num_filters_doc_conv=NUM_FILTERS_DOC_CONV, \n",
    "                        num_classes=NUM_CLASSES, \n",
    "                        l2_reg_lambda=L2_REG_LAMBDA)\n",
    "        \n",
    "        # Define Training procedure\n",
    "        global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n",
    "        optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)\n",
    "        grads_and_vars = optimizer.compute_gradients(cnn.loss)\n",
    "        train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n",
    "        \n",
    "        # Output directory for models and summaries\n",
    "        timestamp = str(int(time.time()))\n",
    "        out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n",
    "        print(\"Writing to {}\\n\".format(out_dir))\n",
    "        \n",
    "        # Summaries for loss and accuracy\n",
    "        loss_summary = tf.summary.scalar(\"loss\", cnn.loss)\n",
    "        acc_summary = tf.summary.scalar(\"accuracy\", cnn.accuracy)\n",
    "        \n",
    "        # Train Summaries\n",
    "        train_summary_op = tf.summary.merge([loss_summary, acc_summary])\n",
    "        train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n",
    "        train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n",
    "        \n",
    "        # Dev summaries\n",
    "        dev_summary_op = tf.summary.merge([loss_summary, acc_summary])\n",
    "        dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n",
    "        dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n",
    "        \n",
    "        # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n",
    "        checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n",
    "        checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n",
    "        if not os.path.exists(checkpoint_dir):\n",
    "            os.makedirs(checkpoint_dir)\n",
    "        saver = tf.train.Saver(tf.global_variables(), max_to_keep=NUM_CHECKPOINTS)\n",
    "        \n",
    "        # Initialize all variables\n",
    "        sess.run(tf.global_variables_initializer())\n",
    "        \n",
    "        def train_step(x_batch, y_batch):\n",
    "            \"\"\"\n",
    "            A single training step\n",
    "            \"\"\"\n",
    "            feed_dict = {\n",
    "                cnn.input_x: x_batch,\n",
    "                cnn.input_y: y_batch,\n",
    "                cnn.input_size: len(y_batch),\n",
    "                cnn.dropout: DROPOUT_KEEP_PROB\n",
    "            }\n",
    "            _, step, summaries, loss, accuracy = sess.run(\n",
    "                [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n",
    "                feed_dict)\n",
    "            time_str = datetime.datetime.now().isoformat()\n",
    "            print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n",
    "            train_summary_writer.add_summary(summaries, step)\n",
    "        \n",
    "        \n",
    "        def dev_step(x_batch, y_batch, writer=None):\n",
    "            \"\"\"\n",
    "            Evaluates model on a dev set\n",
    "            \"\"\"\n",
    "            feed_dict = {\n",
    "                cnn.input_x: x_batch,\n",
    "                cnn.input_y: y_batch,\n",
    "                cnn.input_size: y_batch.shape[0],\n",
    "                cnn.dropout: 1.0\n",
    "            }\n",
    "            step, summaries, loss, accuracy = sess.run(\n",
    "                [global_step, dev_summary_op, cnn.loss, cnn.accuracy],\n",
    "                feed_dict)\n",
    "            time_str = datetime.datetime.now().isoformat()\n",
    "            print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n",
    "            if writer:\n",
    "                writer.add_summary(summaries, step)\n",
    "        \n",
    "        print('train data shape ', train_data.shape)\n",
    "        print('train labels shape ', train_labels.shape)\n",
    "        print('validation data shape ', validation_data.shape)\n",
    "        print('validation labels shape ', validation_labels.shape)\n",
    "        \n",
    "        # Generate batches\n",
    "        batches = batch_iter(\n",
    "            list(zip(train_data, train_labels)), BATCH_SIZE, NUM_EPOCHS)\n",
    "        # Training loop. For each batch...\n",
    "        for batch in batches:\n",
    "            x_batch, y_batch = zip(*batch)\n",
    "            train_step(x_batch, y_batch)\n",
    "            current_step = tf.train.global_step(sess, global_step)\n",
    "            if current_step % EVALUATE_EVERY == 0:\n",
    "                print(\"\\nEvaluation:\")\n",
    "                dev_step(validation_data, validation_labels, writer=dev_summary_writer)\n",
    "                print(\"\")\n",
    "            if current_step % CHECKPOINT_EVERY == 0:\n",
    "                path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n",
    "                print(\"Saved model checkpoint to {}\\n\".format(path))\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
