{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "/usr/local/lib/python3.5/dist-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n",
      "  \"This module will be removed in 0.20.\", DeprecationWarning)\n"
     ]
    }
   ],
   "source": [
    "from utils import *\n",
    "import tensorflow as tf\n",
    "from sklearn.cross_validation import train_test_split\n",
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['negative', 'positive']\n",
      "10662\n",
      "10662\n"
     ]
    }
   ],
   "source": [
    "trainset = sklearn.datasets.load_files(container_path = 'data', encoding = 'UTF-8')\n",
    "trainset.data, trainset.target = separate_dataset(trainset,1.0)\n",
    "print (trainset.target_names)\n",
    "print (len(trainset.data))\n",
    "print (len(trainset.target))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "ONEHOT = np.zeros((len(trainset.data),len(trainset.target_names)))\n",
    "ONEHOT[np.arange(len(trainset.data)),trainset.target] = 1.0\n",
    "train_X, test_X, train_Y, test_Y, train_onehot, test_onehot = train_test_split(trainset.data, \n",
    "                                                                               trainset.target, \n",
    "                                                                               ONEHOT, test_size = 0.2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "vocab from size: 20465\n",
      "Most common words [('the', 10129), ('a', 7312), ('and', 6199), ('of', 6063), ('to', 4233), ('is', 3378)]\n",
      "Sample data [4, 657, 9, 2836, 8, 22, 4, 3424, 17440, 97] ['the', 'rock', 'is', 'destined', 'to', 'be', 'the', '21st', 'centurys', 'new']\n"
     ]
    }
   ],
   "source": [
    "concat = ' '.join(trainset.data).split()\n",
    "vocabulary_size = len(list(set(concat)))\n",
    "data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)\n",
    "print('vocab from size: %d'%(vocabulary_size))\n",
    "print('Most common words', count[4:10])\n",
    "print('Sample data', data[:10], [rev_dictionary[i] for i in data[:10]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "GO = dictionary['GO']\n",
    "PAD = dictionary['PAD']\n",
    "EOS = dictionary['EOS']\n",
    "UNK = dictionary['UNK']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class NTMCell():\n",
    "    def __init__(self, rnn_size, memory_size, memory_vector_dim, read_head_num, write_head_num,\n",
    "                 addressing_mode='content_and_location', shift_range=1, reuse=False, output_dim=None):\n",
    "        self.rnn_size = rnn_size\n",
    "        self.memory_size = memory_size\n",
    "        self.memory_vector_dim = memory_vector_dim\n",
    "        self.read_head_num = read_head_num\n",
    "        self.write_head_num = write_head_num\n",
    "        self.addressing_mode = addressing_mode\n",
    "        self.reuse = reuse\n",
    "        self.controller = tf.nn.rnn_cell.BasicRNNCell(self.rnn_size)\n",
    "        self.step = 0\n",
    "        self.output_dim = output_dim\n",
    "        self.shift_range = shift_range\n",
    "    \n",
    "    def __call__(self, x, prev_state):\n",
    "        prev_read_vector_list = prev_state['read_vector_list']\n",
    "        prev_controller_state = prev_state['controller_state']\n",
    "        controller_input = tf.concat([x] + prev_read_vector_list, axis=1)\n",
    "        with tf.variable_scope('controller', reuse=self.reuse):\n",
    "            controller_output, controller_state = self.controller(controller_input, prev_controller_state)\n",
    "        num_parameters_per_head = self.memory_vector_dim + 1 + 1 + (self.shift_range * 2 + 1) + 1\n",
    "        num_heads = self.read_head_num + self.write_head_num\n",
    "        total_parameter_num = num_parameters_per_head * num_heads + self.memory_vector_dim * 2 * self.write_head_num\n",
    "        with tf.variable_scope(\"o2p\", reuse=(self.step > 0) or self.reuse):\n",
    "            o2p_w = tf.get_variable('o2p_w', [controller_output.get_shape()[1], total_parameter_num],\n",
    "                                    initializer=tf.random_normal_initializer(mean=0.0, stddev=0.5))\n",
    "            o2p_b = tf.get_variable('o2p_b', [total_parameter_num],\n",
    "                                    initializer=tf.random_normal_initializer(mean=0.0, stddev=0.5))\n",
    "            parameters = tf.nn.xw_plus_b(controller_output, o2p_w, o2p_b)\n",
    "        head_parameter_list = tf.split(parameters[:, :num_parameters_per_head * num_heads], num_heads, axis=1)\n",
    "        erase_add_list = tf.split(parameters[:, num_parameters_per_head * num_heads:], 2 * self.write_head_num, axis=1)\n",
    "        prev_w_list = prev_state['w_list']\n",
    "        prev_M = prev_state['M']\n",
    "        w_list = []\n",
    "        p_list = []\n",
    "        for i, head_parameter in enumerate(head_parameter_list):\n",
    "            k = tf.tanh(head_parameter[:, 0:self.memory_vector_dim])\n",
    "            beta = tf.sigmoid(head_parameter[:, self.memory_vector_dim]) * 10 \n",
    "            g = tf.sigmoid(head_parameter[:, self.memory_vector_dim + 1])\n",
    "            s = tf.nn.softmax(\n",
    "                head_parameter[:, self.memory_vector_dim + 2:self.memory_vector_dim + 2 + (self.shift_range * 2 + 1)])\n",
    "            gamma = tf.log(tf.exp(head_parameter[:, -1]) + 1) + 1\n",
    "            with tf.variable_scope('addressing_head_%d' % i):\n",
    "                w = self.addressing(k, beta, g, s, gamma, prev_M, prev_w_list[i])     # Figure 2\n",
    "            w_list.append(w)\n",
    "            p_list.append({'k': k, 'beta': beta, 'g': g, 's': s, 'gamma': gamma})\n",
    "        read_w_list = w_list[:self.read_head_num]\n",
    "        read_vector_list = []\n",
    "        for i in range(self.read_head_num):\n",
    "            read_vector = tf.reduce_sum(tf.expand_dims(read_w_list[i], dim=2) * prev_M, axis=1)\n",
    "            read_vector_list.append(read_vector)\n",
    "        write_w_list = w_list[self.read_head_num:]\n",
    "        M = prev_M\n",
    "        for i in range(self.write_head_num):\n",
    "            w = tf.expand_dims(write_w_list[i], axis=2)\n",
    "            erase_vector = tf.expand_dims(tf.sigmoid(erase_add_list[i * 2]), axis=1)\n",
    "            add_vector = tf.expand_dims(tf.tanh(erase_add_list[i * 2 + 1]), axis=1)\n",
    "            M = M * (tf.ones(M.get_shape()) - tf.matmul(w, erase_vector)) + tf.matmul(w, add_vector)\n",
    "\n",
    "        if not self.output_dim:\n",
    "            output_dim = x.get_shape()[1]\n",
    "        else:\n",
    "            output_dim = self.output_dim\n",
    "        with tf.variable_scope(\"o2o\", reuse=(self.step > 0) or self.reuse):\n",
    "            o2o_w = tf.get_variable('o2o_w', [controller_output.get_shape()[1], output_dim],\n",
    "                                    initializer=tf.random_normal_initializer(mean=0.0, stddev=0.5))\n",
    "            o2o_b = tf.get_variable('o2o_b', [output_dim],\n",
    "                                    initializer=tf.random_normal_initializer(mean=0.0, stddev=0.5))\n",
    "            NTM_output = tf.nn.xw_plus_b(controller_output, o2o_w, o2o_b)\n",
    "        state = {\n",
    "            'controller_state': controller_state,\n",
    "            'read_vector_list': read_vector_list,\n",
    "            'w_list': w_list,\n",
    "            'p_list': p_list,\n",
    "            'M': M}\n",
    "        self.step += 1\n",
    "        return NTM_output, state\n",
    "\n",
    "    def addressing(self, k, beta, g, s, gamma, prev_M, prev_w):\n",
    "        k = tf.expand_dims(k, axis=2)\n",
    "        inner_product = tf.matmul(prev_M, k)\n",
    "        k_norm = tf.sqrt(tf.reduce_sum(tf.square(k), axis=1, keep_dims=True))\n",
    "        M_norm = tf.sqrt(tf.reduce_sum(tf.square(prev_M), axis=2, keep_dims=True))\n",
    "        norm_product = M_norm * k_norm\n",
    "        K = tf.squeeze(inner_product / (norm_product + 1e-8)) \n",
    "        K_amplified = tf.exp(tf.expand_dims(beta, axis=1) * K)\n",
    "        w_c = K_amplified / tf.reduce_sum(K_amplified, axis=1, keep_dims=True) \n",
    "        if self.addressing_mode == 'content': \n",
    "            return w_c\n",
    "        g = tf.expand_dims(g, axis=1)\n",
    "        w_g = g * w_c + (1 - g) * prev_w \n",
    "\n",
    "        s = tf.concat([s[:, :self.shift_range + 1],\n",
    "                       tf.zeros([s.get_shape()[0], self.memory_size - (self.shift_range * 2 + 1)]),\n",
    "                       s[:, -self.shift_range:]], axis=1)\n",
    "        t = tf.concat([tf.reverse(s, axis=[1]), tf.reverse(s, axis=[1])], axis=1)\n",
    "        s_matrix = tf.stack(\n",
    "            [t[:, self.memory_size - i - 1:self.memory_size * 2 - i - 1] for i in range(self.memory_size)],\n",
    "            axis=1)\n",
    "        w_ = tf.reduce_sum(tf.expand_dims(w_g, axis=1) * s_matrix, axis=2)\n",
    "        w_sharpen = tf.pow(w_, tf.expand_dims(gamma, axis=1))\n",
    "        w = w_sharpen / tf.reduce_sum(w_sharpen, axis=1, keep_dims=True)\n",
    "        return w\n",
    "    \n",
    "    def zero_state(self, batch_size, dtype):\n",
    "        def expand(x, dim, N):\n",
    "            return tf.concat([tf.expand_dims(x, dim) for _ in range(N)], axis=dim)\n",
    "\n",
    "        with tf.variable_scope('init', reuse=self.reuse):\n",
    "            state = {\n",
    "                'controller_state': expand(tf.tanh(tf.get_variable('init_state', self.rnn_size,\n",
    "                                            initializer=tf.random_normal_initializer(mean=0.0, stddev=0.5))),\n",
    "                                  dim=0, N=batch_size),\n",
    "                'read_vector_list': [expand(tf.nn.softmax(tf.get_variable('init_r_%d' % i, [self.memory_vector_dim],\n",
    "                                            initializer=tf.random_normal_initializer(mean=0.0, stddev=0.5))),\n",
    "                                  dim=0, N=batch_size)\n",
    "                           for i in range(self.read_head_num)],\n",
    "                'w_list': [expand(tf.nn.softmax(tf.get_variable('init_w_%d' % i, [self.memory_size],\n",
    "                                            initializer=tf.random_normal_initializer(mean=0.0, stddev=0.5))),\n",
    "                                  dim=0, N=batch_size) if self.addressing_mode == 'content_and_loaction'\n",
    "                           else tf.zeros([batch_size, self.memory_size])\n",
    "                           for i in range(self.read_head_num + self.write_head_num)],\n",
    "                'M': expand(tf.tanh(tf.get_variable('init_M', [self.memory_size, self.memory_vector_dim],\n",
    "                                            initializer=tf.random_normal_initializer(mean=0.0, stddev=0.5))),\n",
    "                                  dim=0, N=batch_size)}\n",
    "            return state"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Model:\n",
    "    def __init__(self, seq_len, size_layer, batch_size, dict_size,\n",
    "                 dimension_input, dimension_output, learning_rate, \n",
    "                 memory_size, memory_vector_size,\n",
    "                read_head_num=4, write_head_num=1):\n",
    "        self.X = tf.placeholder(tf.int32, [batch_size, seq_len])\n",
    "        self.Y = tf.placeholder(tf.float32, [batch_size, dimension_output])\n",
    "        encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, dimension_input], -1, 1))\n",
    "        encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)\n",
    "        cell = NTMCell(size_layer, memory_size, memory_vector_size,\n",
    "                                    read_head_num=read_head_num,\n",
    "                                    write_head_num=write_head_num,\n",
    "                                    addressing_mode='content_and_location',\n",
    "                                    output_dim=dimension_output)\n",
    "        state = cell.zero_state(batch_size, tf.float32)\n",
    "        self.state_list = [state]\n",
    "        self.o = []\n",
    "        o2o_w = tf.Variable(tf.random_normal((dimension_output, dimension_output)))\n",
    "        o2o_b = tf.Variable(tf.random_normal([dimension_output]))\n",
    "        for t in range(seq_len):\n",
    "            output, state = cell(encoder_embedded[:,t,:], state)\n",
    "            output = tf.nn.xw_plus_b(output, o2o_w, o2o_b)\n",
    "            self.o.append(output)\n",
    "            self.state_list.append(state)\n",
    "        self.o = tf.stack(self.o, axis=1)\n",
    "        self.state_list.append(state)\n",
    "        self.logits = self.o[:,-1]\n",
    "        self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self.logits, labels = self.Y))\n",
    "        self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)\n",
    "        self.correct_pred = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))\n",
    "        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "size_layer = 128\n",
    "embedded_size = 128\n",
    "dimension_output = len(trainset.target_names)\n",
    "learning_rate = 1e-3\n",
    "maxlen = 50\n",
    "batch_size = 32\n",
    "memory_size = 128\n",
    "memory_vector_size = 40"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.reset_default_graph()\n",
    "sess = tf.InteractiveSession()\n",
    "model = Model(maxlen,size_layer,batch_size,\n",
    "              vocabulary_size+4,embedded_size,\n",
    "              dimension_output,learning_rate,\n",
    "             memory_size, memory_vector_size)\n",
    "sess.run(tf.global_variables_initializer())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 5, 0, 0, 0\n",
    "while True:\n",
    "    lasttime = time.time()\n",
    "    if CURRENT_CHECKPOINT == EARLY_STOPPING:\n",
    "        print('break epoch:%d\\n'%(EPOCH))\n",
    "        break\n",
    "        \n",
    "    train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n",
    "    for i in range(0, (len(train_X) // batch_size) * batch_size, batch_size):\n",
    "        batch_x = str_idx(train_X[i:i+batch_size],dictionary,maxlen)\n",
    "        acc, loss, _ = sess.run([model.accuracy, model.cost, model.optimizer], \n",
    "                           feed_dict = {model.X : batch_x, model.Y : train_onehot[i:i+batch_size]})\n",
    "        train_loss += loss\n",
    "        train_acc += acc\n",
    "    \n",
    "    for i in range(0, (len(test_X) // batch_size) * batch_size, batch_size):\n",
    "        batch_x = str_idx(test_X[i:i+batch_size],dictionary,maxlen)\n",
    "        acc, loss = sess.run([model.accuracy, model.cost], \n",
    "                           feed_dict = {model.X : batch_x, model.Y : test_onehot[i:i+batch_size]})\n",
    "        test_loss += loss\n",
    "        test_acc += acc\n",
    "    \n",
    "    train_loss /= (len(train_X) // batch_size)\n",
    "    train_acc /= (len(train_X) // batch_size)\n",
    "    test_loss /= (len(test_X) // batch_size)\n",
    "    test_acc /= (len(test_X) // batch_size)\n",
    "    \n",
    "    if test_acc > CURRENT_ACC:\n",
    "        print('epoch: %d, pass acc: %f, current acc: %f'%(EPOCH,CURRENT_ACC, test_acc))\n",
    "        CURRENT_ACC = test_acc\n",
    "        CURRENT_CHECKPOINT = 0\n",
    "    else:\n",
    "        CURRENT_CHECKPOINT += 1\n",
    "        \n",
    "    print('time taken:', time.time()-lasttime)\n",
    "    print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'%(EPOCH,train_loss,\n",
    "                                                                                          train_acc,test_loss,\n",
    "                                                                                          test_acc))\n",
    "    EPOCH += 1"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
