{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import sys, os, _pickle as pickle\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import nltk\n",
    "from sklearn.metrics import f1_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "data_dir = '../data'\n",
    "ckpt_dir = '../checkpoint'\n",
    "word_embd_dir = '../checkpoint/word_embd'\n",
    "model_dir = '../checkpoint/modelv2'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "word_embd_dim = 100\n",
    "pos_embd_dim = 25\n",
    "dep_embd_dim = 25\n",
    "word_vocab_size = 400001\n",
    "pos_vocab_size = 10\n",
    "dep_vocab_size = 21\n",
    "relation_classes = 19\n",
    "word_state_size = 100\n",
    "other_state_size = 100\n",
    "batch_size = 10\n",
    "channels = 3\n",
    "lambda_l2 = 0.0001\n",
    "max_len_path = 10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with tf.name_scope(\"input\"):\n",
    "    path_length = tf.placeholder(tf.int32, shape=[2, batch_size], name=\"path1_length\")\n",
    "    word_ids = tf.placeholder(tf.int32, shape=[2, batch_size, max_len_path], name=\"word_ids\")\n",
    "    pos_ids = tf.placeholder(tf.int32, [2, batch_size, max_len_path], name=\"pos_ids\")\n",
    "    dep_ids = tf.placeholder(tf.int32, [2, batch_size, max_len_path], name=\"dep_ids\")\n",
    "    y = tf.placeholder(tf.int32, [batch_size], name=\"y\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with tf.name_scope(\"word_embedding\"):\n",
    "    W = tf.Variable(tf.constant(0.0, shape=[word_vocab_size, word_embd_dim]), name=\"W\")\n",
    "    embedding_placeholder = tf.placeholder(tf.float32,[word_vocab_size, word_embd_dim])\n",
    "    embedding_init = W.assign(embedding_placeholder)\n",
    "    embedded_word = tf.nn.embedding_lookup(W, word_ids)\n",
    "    word_embedding_saver = tf.train.Saver({\"word_embedding/W\": W})\n",
    "\n",
    "with tf.name_scope(\"pos_embedding\"):\n",
    "    W = tf.Variable(tf.random_uniform([pos_vocab_size, pos_embd_dim]), name=\"W\")\n",
    "    embedded_pos = tf.nn.embedding_lookup(W, pos_ids)\n",
    "    pos_embedding_saver = tf.train.Saver({\"pos_embedding/W\": W})\n",
    "\n",
    "with tf.name_scope(\"dep_embedding\"):\n",
    "    W = tf.Variable(tf.random_uniform([dep_vocab_size, dep_embd_dim]), name=\"W\")\n",
    "    embedded_dep = tf.nn.embedding_lookup(W, dep_ids)\n",
    "    dep_embedding_saver = tf.train.Saver({\"dep_embedding/W\": W})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "word_hidden_state = tf.zeros([batch_size, word_state_size], name='word_hidden_state')\n",
    "word_cell_state = tf.zeros([batch_size, word_state_size], name='word_cell_state')\n",
    "word_init_state = tf.contrib.rnn.LSTMStateTuple(word_hidden_state, word_cell_state)\n",
    "\n",
    "other_hidden_states = tf.zeros([channels-1, batch_size, other_state_size], name=\"hidden_state\")\n",
    "other_cell_states = tf.zeros([channels-1, batch_size, other_state_size], name=\"cell_state\")\n",
    "\n",
    "other_init_states = [tf.contrib.rnn.LSTMStateTuple(other_hidden_states[i], other_cell_states[i]) for i in range(channels-1)]\n",
    "\n",
    "with tf.variable_scope(\"word_lstm1\"):\n",
    "    cell = tf.contrib.rnn.BasicLSTMCell(word_state_size)\n",
    "    state_series, current_state = tf.nn.dynamic_rnn(cell, embedded_word[0], sequence_length=path_length[0], initial_state=word_init_state)\n",
    "    state_series_word1 = tf.reduce_max(state_series, axis=1)\n",
    "\n",
    "with tf.variable_scope(\"word_lstm2\"):\n",
    "    cell = tf.contrib.rnn.BasicLSTMCell(word_state_size)\n",
    "    state_series, current_state = tf.nn.dynamic_rnn(cell, embedded_word[1], sequence_length=path_length[1], initial_state=word_init_state)\n",
    "    state_series_word2 = tf.reduce_max(state_series, axis=1)\n",
    "\n",
    "with tf.variable_scope(\"pos_lstm1\"):\n",
    "    cell = tf.contrib.rnn.BasicLSTMCell(other_state_size)\n",
    "    state_series, current_state = tf.nn.dynamic_rnn(cell, embedded_pos[0], sequence_length=path_length[0],initial_state=other_init_states[0])\n",
    "    state_series_pos1 = tf.reduce_max(state_series, axis=1)\n",
    "\n",
    "with tf.variable_scope(\"pos_lstm2\"):\n",
    "    cell = tf.contrib.rnn.BasicLSTMCell(other_state_size)\n",
    "    state_series, current_state = tf.nn.dynamic_rnn(cell, embedded_pos[1], sequence_length=path_length[1],initial_state=other_init_states[0])\n",
    "    state_series_pos2 = tf.reduce_max(state_series, axis=1)\n",
    "\n",
    "with tf.variable_scope(\"dep_lstm1\"):\n",
    "    cell = tf.contrib.rnn.BasicLSTMCell(other_state_size)\n",
    "    state_series, current_state = tf.nn.dynamic_rnn(cell, embedded_dep[0], sequence_length=path_length[0], initial_state=other_init_states[1])\n",
    "    state_series_dep1 = tf.reduce_max(state_series, axis=1)\n",
    "\n",
    "with tf.variable_scope(\"dep_lstm2\"):\n",
    "    cell = tf.contrib.rnn.BasicLSTMCell(other_state_size)\n",
    "    state_series, current_state = tf.nn.dynamic_rnn(cell, embedded_dep[1], sequence_length=path_length[1], initial_state=other_init_states[1])\n",
    "    state_series_dep2 = tf.reduce_max(state_series, axis=1)\n",
    "\n",
    "state_series1 = tf.concat([state_series_word1, state_series_pos1, state_series_dep1], 1)\n",
    "state_series2 = tf.concat([state_series_word2, state_series_pos2, state_series_dep2], 1)\n",
    "\n",
    "state_series = tf.concat([state_series1, state_series2], 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with tf.name_scope(\"hidden_layer\"):\n",
    "    W = tf.Variable(tf.truncated_normal([600, 100], -0.1, 0.1), name=\"W\")\n",
    "    b = tf.Variable(tf.zeros([100]), name=\"b\")\n",
    "    y_hidden_layer = tf.matmul(state_series, W) + b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with tf.name_scope(\"dropout\"):\n",
    "    h_drop = tf.nn.dropout(y_hidden_layer, 0.3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with tf.name_scope(\"softmax_layer\"):\n",
    "    W = tf.Variable(tf.truncated_normal([100, relation_classes], -0.1, 0.1), name=\"W\")\n",
    "    b = tf.Variable(tf.zeros([relation_classes]), name=\"b\")\n",
    "    logits = tf.matmul(y_hidden_layer, W) + b\n",
    "    predictions = tf.argmax(logits, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "tv_all = tf.trainable_variables()\n",
    "tv_regu = []\n",
    "non_reg = [\"word_embedding/W:0\",\"pos_embedding/W:0\",'dep_embedding/W:0',\"global_step:0\",'hidden_layer/b:0','softmax_layer/b:0']\n",
    "for t in tv_all:\n",
    "    if t.name not in non_reg:\n",
    "        if(t.name.find('biases')==-1):\n",
    "            tv_regu.append(t)\n",
    "\n",
    "with tf.name_scope(\"loss\"):\n",
    "    l2_loss = lambda_l2 * tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv_regu ])\n",
    "    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y))\n",
    "    total_loss = loss + l2_loss\n",
    "\n",
    "global_step = tf.Variable(0, name=\"global_step\")\n",
    "\n",
    "optimizer = tf.train.AdamOptimizer(0.001).minimize(total_loss, global_step=global_step)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "f = open(data_dir + '/vocab.pkl', 'rb')\n",
    "vocab = pickle.load(f)\n",
    "f.close()\n",
    "\n",
    "word2id = dict((w, i) for i,w in enumerate(vocab))\n",
    "id2word = dict((i, w) for i,w in enumerate(vocab))\n",
    "\n",
    "unknown_token = \"UNKNOWN_TOKEN\"\n",
    "word2id[unknown_token] = word_vocab_size -1\n",
    "id2word[word_vocab_size-1] = unknown_token\n",
    "\n",
    "pos_tags_vocab = []\n",
    "for line in open(data_dir + '/pos_tags.txt'):\n",
    "        pos_tags_vocab.append(line.strip())\n",
    "\n",
    "dep_vocab = []\n",
    "for line in open(data_dir + '/dependency_types.txt'):\n",
    "    dep_vocab.append(line.strip())\n",
    "\n",
    "relation_vocab = []\n",
    "for line in open(data_dir + '/relation_types.txt'):\n",
    "    relation_vocab.append(line.strip())\n",
    "\n",
    "\n",
    "rel2id = dict((w, i) for i,w in enumerate(relation_vocab))\n",
    "id2rel = dict((i, w) for i,w in enumerate(relation_vocab))\n",
    "\n",
    "pos_tag2id = dict((w, i) for i,w in enumerate(pos_tags_vocab))\n",
    "id2pos_tag = dict((i, w) for i,w in enumerate(pos_tags_vocab))\n",
    "\n",
    "dep2id = dict((w, i) for i,w in enumerate(dep_vocab))\n",
    "id2dep = dict((i, w) for i,w in enumerate(dep_vocab))\n",
    "\n",
    "pos_tag2id['OTH'] = 9\n",
    "id2pos_tag[9] = 'OTH'\n",
    "\n",
    "dep2id['OTH'] = 20\n",
    "id2dep[20] = 'OTH'\n",
    "\n",
    "JJ_pos_tags = ['JJ', 'JJR', 'JJS']\n",
    "NN_pos_tags = ['NN', 'NNS', 'NNP', 'NNPS']\n",
    "RB_pos_tags = ['RB', 'RBR', 'RBS']\n",
    "PRP_pos_tags = ['PRP', 'PRP$']\n",
    "VB_pos_tags = ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']\n",
    "_pos_tags = ['CC', 'CD', 'DT', 'IN']\n",
    "\n",
    "def pos_tag(x):\n",
    "    if x in JJ_pos_tags:\n",
    "        return pos_tag2id['JJ']\n",
    "    if x in NN_pos_tags:\n",
    "        return pos_tag2id['NN']\n",
    "    if x in RB_pos_tags:\n",
    "        return pos_tag2id['RB']\n",
    "    if x in PRP_pos_tags:\n",
    "        return pos_tag2id['PRP']\n",
    "    if x in VB_pos_tags:\n",
    "        return pos_tag2id['VB']\n",
    "    if x in _pos_tags:\n",
    "        return pos_tag2id[x]\n",
    "    else:\n",
    "        return 9"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "f = open(data_dir + '/train_paths', 'rb')\n",
    "word_p1, word_p2, dep_p1, dep_p2, pos_p1, pos_p2 = pickle.load(f)\n",
    "f.close()\n",
    "\n",
    "relations = []\n",
    "for line in open(data_dir + '/train_relations.txt'):\n",
    "    relations.append(line.strip().split()[1])\n",
    "\n",
    "length = len(word_p1)\n",
    "num_batches = int(length/batch_size)\n",
    "\n",
    "for i in range(length):\n",
    "    for j, word in enumerate(word_p1[i]):\n",
    "        word = word.lower()\n",
    "        word_p1[i][j] = word if word in word2id else unknown_token \n",
    "    for k, word in enumerate(word_p2[i]):\n",
    "        word = word.lower()\n",
    "        word_p2[i][k] = word if word in word2id else unknown_token \n",
    "    for l, d in enumerate(dep_p1[i]):\n",
    "        dep_p1[i][l] = d if d in dep2id else 'OTH'\n",
    "    for m, d in enumerate(dep_p2[i]):\n",
    "        dep_p2[i][m] = d if d in dep2id else 'OTH'\n",
    "\n",
    "word_p1_ids = np.ones([length, max_len_path],dtype=int)\n",
    "word_p2_ids = np.ones([length, max_len_path],dtype=int)\n",
    "pos_p1_ids = np.ones([length, max_len_path],dtype=int)\n",
    "pos_p2_ids = np.ones([length, max_len_path],dtype=int)\n",
    "dep_p1_ids = np.ones([length, max_len_path],dtype=int)\n",
    "dep_p2_ids = np.ones([length, max_len_path],dtype=int)\n",
    "rel_ids = np.array([rel2id[rel] for rel in relations])\n",
    "path1_len = np.array([len(w) for w in word_p1], dtype=int)\n",
    "path2_len = np.array([len(w) for w in word_p2])\n",
    "\n",
    "for i in range(length):\n",
    "    for j, w in enumerate(word_p1[i]):\n",
    "        word_p1_ids[i][j] = word2id[w]\n",
    "    for j, w in enumerate(word_p2[i]):\n",
    "        word_p2_ids[i][j] = word2id[w]\n",
    "    for j, w in enumerate(pos_p1[i]):\n",
    "        pos_p1_ids[i][j] = pos_tag(w)\n",
    "    for j, w in enumerate(pos_p2[i]):\n",
    "        pos_p2_ids[i][j] = pos_tag(w)\n",
    "    for j, w in enumerate(dep_p1[i]):\n",
    "        dep_p1_ids[i][j] = dep2id[w]\n",
    "    for j, w in enumerate(dep_p2[i]):\n",
    "        dep_p2_ids[i][j] = dep2id[w]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "sess = tf.Session()\n",
    "sess.run(tf.global_variables_initializer())\n",
    "saver = tf.train.Saver()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# f = open('data/word_embedding', 'rb')\n",
    "# word_embedding = pickle.load(f)\n",
    "# f.close()\n",
    "\n",
    "# sess.run(embedding_init, feed_dict={embedding_placeholder:word_embedding})\n",
    "# word_embedding_saver.save(sess, word_embd_dir + '/word_embd')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# model = tf.train.latest_checkpoint(model_dir)\n",
    "# saver.restore(sess, model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from checkpoint/word_embd/word_embd\n"
     ]
    }
   ],
   "source": [
    "latest_embd = tf.train.latest_checkpoint(word_embd_dir)\n",
    "word_embedding_saver.restore(sess, latest_embd)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step: 10 loss: 2.88023\n",
      "Step: 20 loss: 2.82584\n",
      "Step: 30 loss: 2.73288\n",
      "Step: 40 loss: 2.33498\n",
      "Step: 50 loss: 2.74904\n",
      "Step: 60 loss: 2.57171\n",
      "Step: 70 loss: 2.68678\n",
      "Step: 80 loss: 2.43279\n",
      "Step: 90 loss: 2.67284\n",
      "Step: 100 loss: 2.54792\n",
      "Step: 110 loss: 2.44991\n",
      "Step: 120 loss: 2.57479\n",
      "Step: 130 loss: 2.06215\n",
      "Step: 140 loss: 2.14534\n",
      "Step: 150 loss: 2.4418\n",
      "Step: 160 loss: 1.8834\n",
      "Step: 170 loss: 1.94869\n",
      "Step: 180 loss: 1.85433\n",
      "Step: 190 loss: 1.61863\n",
      "Step: 200 loss: 2.24159\n",
      "Step: 210 loss: 1.67084\n",
      "Step: 220 loss: 2.09018\n",
      "Step: 230 loss: 2.24812\n",
      "Step: 240 loss: 1.45496\n",
      "Step: 250 loss: 1.95246\n",
      "Step: 260 loss: 1.19583\n",
      "Step: 270 loss: 1.57627\n",
      "Step: 280 loss: 1.42064\n",
      "Step: 290 loss: 1.92385\n",
      "Step: 300 loss: 1.79492\n",
      "Step: 310 loss: 1.65186\n",
      "Step: 320 loss: 1.55759\n",
      "Step: 330 loss: 1.71198\n",
      "Step: 340 loss: 1.32262\n",
      "Step: 350 loss: 1.14047\n",
      "Step: 360 loss: 1.51696\n",
      "Step: 370 loss: 1.54455\n",
      "Step: 380 loss: 1.06301\n",
      "Step: 390 loss: 1.69749\n",
      "Step: 400 loss: 1.87595\n",
      "Step: 410 loss: 1.63556\n",
      "Step: 420 loss: 1.2436\n",
      "Step: 430 loss: 0.793735\n",
      "Step: 440 loss: 0.892609\n",
      "Step: 450 loss: 1.09287\n",
      "Step: 460 loss: 1.03504\n",
      "Step: 470 loss: 0.738219\n",
      "Step: 480 loss: 1.06359\n",
      "Step: 490 loss: 1.51849\n",
      "Step: 500 loss: 0.793486\n",
      "Step: 510 loss: 0.64094\n",
      "Step: 520 loss: 0.656823\n",
      "Step: 530 loss: 0.842154\n",
      "Step: 540 loss: 0.521849\n",
      "Step: 550 loss: 0.874004\n",
      "Step: 560 loss: 0.818532\n",
      "Step: 570 loss: 0.720943\n",
      "Step: 580 loss: 0.360149\n",
      "Step: 590 loss: 0.588474\n",
      "Step: 600 loss: 0.250631\n",
      "Step: 610 loss: 0.515828\n",
      "Step: 620 loss: 1.06398\n",
      "Step: 630 loss: 1.49793\n",
      "Step: 640 loss: 0.924053\n",
      "Step: 650 loss: 0.691453\n",
      "Step: 660 loss: 0.960509\n",
      "Step: 670 loss: 0.97871\n",
      "Step: 680 loss: 0.182004\n",
      "Step: 690 loss: 0.756514\n",
      "Step: 700 loss: 0.988201\n",
      "Step: 710 loss: 0.915829\n",
      "Step: 720 loss: 0.861252\n",
      "Step: 730 loss: 0.84051\n",
      "Step: 740 loss: 0.895424\n",
      "Step: 750 loss: 1.12444\n",
      "Step: 760 loss: 0.727627\n",
      "Step: 770 loss: 0.800222\n",
      "Step: 780 loss: 0.265388\n",
      "Step: 790 loss: 0.910404\n",
      "Step: 800 loss: 0.488359\n",
      "Step: 810 loss: 0.950841\n",
      "Step: 820 loss: 0.4143\n",
      "Step: 830 loss: 0.579588\n",
      "Step: 840 loss: 0.677927\n",
      "Step: 850 loss: 1.40732\n",
      "Step: 860 loss: 1.13621\n",
      "Step: 870 loss: 0.42374\n",
      "Step: 880 loss: 0.571424\n",
      "Step: 890 loss: 0.520662\n",
      "Step: 900 loss: 0.512721\n",
      "Step: 910 loss: 0.152484\n",
      "Step: 920 loss: 0.618815\n",
      "Step: 930 loss: 0.509155\n",
      "Step: 940 loss: 0.193266\n",
      "Step: 950 loss: 0.219956\n",
      "Step: 960 loss: 0.357899\n",
      "Step: 970 loss: 0.454869\n",
      "Step: 980 loss: 0.447206\n",
      "Step: 990 loss: 0.274612\n",
      "Step: 1000 loss: 0.462686\n",
      "Saved Model\n",
      "Step: 1010 loss: 0.530896\n",
      "Step: 1020 loss: 0.298706\n",
      "Step: 1030 loss: 0.463\n",
      "Step: 1040 loss: 0.282751\n",
      "Step: 1050 loss: 0.28275\n",
      "Step: 1060 loss: 0.156807\n",
      "Step: 1070 loss: 0.262681\n",
      "Step: 1080 loss: 0.448407\n",
      "Step: 1090 loss: 0.46092\n",
      "Step: 1100 loss: 0.214223\n",
      "Step: 1110 loss: 0.2769\n",
      "Step: 1120 loss: 0.17235\n",
      "Step: 1130 loss: 0.140862\n",
      "Step: 1140 loss: 0.398407\n",
      "Step: 1150 loss: 0.194407\n",
      "Step: 1160 loss: 0.255582\n",
      "Step: 1170 loss: 0.204024\n",
      "Step: 1180 loss: 0.138213\n",
      "Step: 1190 loss: 0.164386\n",
      "Step: 1200 loss: 0.279604\n",
      "Step: 1210 loss: 0.57749\n",
      "Step: 1220 loss: 0.13971\n",
      "Step: 1230 loss: 0.407656\n",
      "Step: 1240 loss: 0.146737\n",
      "Step: 1250 loss: 0.18383\n",
      "Step: 1260 loss: 0.464973\n",
      "Step: 1270 loss: 0.119731\n",
      "Step: 1280 loss: 0.282338\n",
      "Step: 1290 loss: 0.16192\n",
      "Step: 1300 loss: 0.375285\n",
      "Step: 1310 loss: 0.226231\n",
      "Step: 1320 loss: 0.290858\n",
      "Step: 1330 loss: 0.267802\n",
      "Step: 1340 loss: 0.124168\n",
      "Step: 1350 loss: 0.153703\n",
      "Step: 1360 loss: 0.284834\n",
      "Step: 1370 loss: 0.199283\n",
      "Step: 1380 loss: 0.249811\n",
      "Step: 1390 loss: 0.154034\n",
      "Step: 1400 loss: 0.548356\n",
      "Step: 1410 loss: 0.180473\n",
      "Step: 1420 loss: 0.15353\n",
      "Step: 1430 loss: 0.140247\n",
      "Step: 1440 loss: 0.257908\n",
      "Step: 1450 loss: 0.157383\n",
      "Step: 1460 loss: 0.136053\n",
      "Step: 1470 loss: 0.116849\n",
      "Step: 1480 loss: 0.190823\n",
      "Step: 1490 loss: 0.126334\n",
      "Step: 1500 loss: 0.122665\n",
      "Step: 1510 loss: 0.413359\n",
      "Step: 1520 loss: 0.163194\n",
      "Step: 1530 loss: 0.263992\n",
      "Step: 1540 loss: 0.11905\n",
      "Step: 1550 loss: 0.148155\n",
      "Step: 1560 loss: 0.0989128\n",
      "Step: 1570 loss: 0.158463\n",
      "Step: 1580 loss: 0.158307\n",
      "Step: 1590 loss: 0.129369\n",
      "Step: 1600 loss: 0.154085\n",
      "Step: 1610 loss: 0.11863\n",
      "Step: 1620 loss: 0.106599\n",
      "Step: 1630 loss: 0.133941\n",
      "Step: 1640 loss: 0.117655\n",
      "Step: 1650 loss: 0.270219\n",
      "Step: 1660 loss: 0.115813\n",
      "Step: 1670 loss: 0.122772\n",
      "Step: 1680 loss: 0.109636\n",
      "Step: 1690 loss: 0.106271\n",
      "Step: 1700 loss: 0.101707\n",
      "Step: 1710 loss: 0.0875114\n",
      "Step: 1720 loss: 0.108265\n",
      "Step: 1730 loss: 0.135553\n",
      "Step: 1740 loss: 0.101356\n",
      "Step: 1750 loss: 0.10563\n",
      "Step: 1760 loss: 0.107229\n",
      "Step: 1770 loss: 0.121855\n",
      "Step: 1780 loss: 0.0905529\n",
      "Step: 1790 loss: 0.092672\n",
      "Step: 1800 loss: 0.118814\n",
      "Step: 1810 loss: 0.0905313\n",
      "Step: 1820 loss: 0.106164\n",
      "Step: 1830 loss: 0.0868765\n",
      "Step: 1840 loss: 0.0996025\n",
      "Step: 1850 loss: 0.0887553\n",
      "Step: 1860 loss: 0.0992355\n",
      "Step: 1870 loss: 0.0965959\n",
      "Step: 1880 loss: 0.0900526\n",
      "Step: 1890 loss: 0.0830978\n",
      "Step: 1900 loss: 0.0892735\n",
      "Step: 1910 loss: 0.10914\n",
      "Step: 1920 loss: 0.143269\n",
      "Step: 1930 loss: 0.107885\n",
      "Step: 1940 loss: 0.16154\n",
      "Step: 1950 loss: 0.0882204\n",
      "Step: 1960 loss: 0.208128\n",
      "Step: 1970 loss: 0.0929074\n",
      "Step: 1980 loss: 0.0935487\n",
      "Step: 1990 loss: 0.0984242\n",
      "Step: 2000 loss: 0.0991593\n",
      "Saved Model\n",
      "Step: 2010 loss: 0.135896\n",
      "Step: 2020 loss: 0.0951169\n",
      "Step: 2030 loss: 0.0930211\n",
      "Step: 2040 loss: 0.0828225\n",
      "Step: 2050 loss: 0.0845582\n",
      "Step: 2060 loss: 0.0936542\n",
      "Step: 2070 loss: 0.087472\n",
      "Step: 2080 loss: 0.1761\n",
      "Step: 2090 loss: 0.082662\n",
      "Step: 2100 loss: 0.0970386\n",
      "Step: 2110 loss: 0.0875647\n",
      "Step: 2120 loss: 0.0850562\n",
      "Step: 2130 loss: 0.0830852\n",
      "Step: 2140 loss: 0.0887249\n",
      "Step: 2150 loss: 0.0897888\n",
      "Step: 2160 loss: 0.0869396\n",
      "Step: 2170 loss: 0.123164\n",
      "Step: 2180 loss: 0.0791501\n",
      "Step: 2190 loss: 0.0801781\n",
      "Step: 2200 loss: 0.0902712\n",
      "Step: 2210 loss: 0.0997826\n",
      "Step: 2220 loss: 0.0803998\n",
      "Step: 2230 loss: 0.077604\n",
      "Step: 2240 loss: 0.102242\n",
      "Step: 2250 loss: 0.101963\n",
      "Step: 2260 loss: 0.0998759\n",
      "Step: 2270 loss: 0.0910025\n",
      "Step: 2280 loss: 0.0757864\n",
      "Step: 2290 loss: 0.0785011\n",
      "Step: 2300 loss: 0.078705\n",
      "Step: 2310 loss: 0.0916113\n",
      "Step: 2320 loss: 0.104755\n",
      "Step: 2330 loss: 0.0845038\n",
      "Step: 2340 loss: 0.0825501\n",
      "Step: 2350 loss: 0.0981562\n",
      "Step: 2360 loss: 0.091279\n",
      "Step: 2370 loss: 0.0782879\n",
      "Step: 2380 loss: 0.0802806\n",
      "Step: 2390 loss: 0.0870144\n",
      "Step: 2400 loss: 0.0849014\n",
      "Step: 2410 loss: 0.0862748\n",
      "Step: 2420 loss: 0.0830385\n",
      "Step: 2430 loss: 0.0743998\n",
      "Step: 2440 loss: 0.0804079\n",
      "Step: 2450 loss: 0.0842353\n",
      "Step: 2460 loss: 0.0841687\n",
      "Step: 2470 loss: 0.0833311\n",
      "Step: 2480 loss: 0.0806125\n",
      "Step: 2490 loss: 0.0728182\n",
      "Step: 2500 loss: 0.0749073\n",
      "Step: 2510 loss: 0.0826365\n",
      "Step: 2520 loss: 0.0822583\n",
      "Step: 2530 loss: 0.0808448\n",
      "Step: 2540 loss: 0.084984\n",
      "Step: 2550 loss: 0.0723713\n",
      "Step: 2560 loss: 0.0810138\n",
      "Step: 2570 loss: 0.080957\n",
      "Step: 2580 loss: 0.0903101\n",
      "Step: 2590 loss: 0.0780246\n",
      "Step: 2600 loss: 0.0723606\n",
      "Step: 2610 loss: 0.0727261\n",
      "Step: 2620 loss: 0.0720309\n",
      "Step: 2630 loss: 0.0874925\n",
      "Step: 2640 loss: 0.0721527\n",
      "Step: 2650 loss: 0.0710271\n",
      "Step: 2660 loss: 0.0705722\n",
      "Step: 2670 loss: 0.0712357\n",
      "Step: 2680 loss: 0.0765828\n",
      "Step: 2690 loss: 0.0764556\n",
      "Step: 2700 loss: 0.0769606\n",
      "Step: 2710 loss: 0.0764759\n",
      "Step: 2720 loss: 0.0846907\n",
      "Step: 2730 loss: 0.0710521\n",
      "Step: 2740 loss: 0.0761884\n",
      "Step: 2750 loss: 0.0748823\n",
      "Step: 2760 loss: 0.0728616\n",
      "Step: 2770 loss: 0.0703158\n",
      "Step: 2780 loss: 0.0747337\n",
      "Step: 2790 loss: 0.0709759\n",
      "Step: 2800 loss: 0.0736991\n",
      "Step: 2810 loss: 0.0705774\n",
      "Step: 2820 loss: 0.0696915\n",
      "Step: 2830 loss: 0.0725464\n",
      "Step: 2840 loss: 0.068646\n",
      "Step: 2850 loss: 0.0871379\n",
      "Step: 2860 loss: 0.0728063\n",
      "Step: 2870 loss: 0.10615\n",
      "Step: 2880 loss: 0.0770969\n",
      "Step: 2890 loss: 0.0751925\n",
      "Step: 2900 loss: 0.0724477\n",
      "Step: 2910 loss: 0.0726863\n",
      "Step: 2920 loss: 0.071429\n",
      "Step: 2930 loss: 0.0704034\n",
      "Step: 2940 loss: 0.071196\n",
      "Step: 2950 loss: 0.0689573\n",
      "Step: 2960 loss: 0.0717139\n",
      "Step: 2970 loss: 0.0720054\n",
      "Step: 2980 loss: 0.0710371\n",
      "Step: 2990 loss: 0.0690059\n",
      "Step: 3000 loss: 0.0707356\n",
      "Saved Model\n",
      "Step: 3010 loss: 0.0754333\n",
      "Step: 3020 loss: 0.0741854\n",
      "Step: 3030 loss: 0.0669365\n",
      "Step: 3040 loss: 0.068253\n",
      "Step: 3050 loss: 0.0791157\n",
      "Step: 3060 loss: 0.066303\n",
      "Step: 3070 loss: 0.0674031\n",
      "Step: 3080 loss: 0.07157\n",
      "Step: 3090 loss: 0.0670253\n",
      "Step: 3100 loss: 0.0744858\n",
      "Step: 3110 loss: 0.0700279\n",
      "Step: 3120 loss: 0.0675299\n",
      "Step: 3130 loss: 0.068519\n",
      "Step: 3140 loss: 0.068502\n",
      "Step: 3150 loss: 0.0655795\n",
      "Step: 3160 loss: 0.0643536\n",
      "Step: 3170 loss: 0.0681256\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step: 3180 loss: 0.065979\n",
      "Step: 3190 loss: 0.0704952\n",
      "Step: 3200 loss: 0.0683691\n",
      "Step: 3210 loss: 0.0640325\n",
      "Step: 3220 loss: 0.0635774\n",
      "Step: 3230 loss: 0.064269\n",
      "Step: 3240 loss: 0.0666034\n",
      "Step: 3250 loss: 0.0615001\n",
      "Step: 3260 loss: 0.0647621\n",
      "Step: 3270 loss: 0.0647485\n",
      "Step: 3280 loss: 0.0636391\n",
      "Step: 3290 loss: 0.0610651\n",
      "Step: 3300 loss: 0.0624499\n",
      "Step: 3310 loss: 0.0607444\n",
      "Step: 3320 loss: 0.0616932\n",
      "Step: 3330 loss: 0.0648961\n",
      "Step: 3340 loss: 0.0689047\n",
      "Step: 3350 loss: 0.0627339\n",
      "Step: 3360 loss: 0.0629497\n",
      "Step: 3370 loss: 0.0621283\n",
      "Step: 3380 loss: 0.0657708\n",
      "Step: 3390 loss: 0.0596765\n",
      "Step: 3400 loss: 0.0620293\n",
      "Step: 3410 loss: 0.0671486\n",
      "Step: 3420 loss: 0.0617691\n",
      "Step: 3430 loss: 0.0637014\n",
      "Step: 3440 loss: 0.0640508\n",
      "Step: 3450 loss: 0.060757\n",
      "Step: 3460 loss: 0.0642266\n",
      "Step: 3470 loss: 0.063313\n",
      "Step: 3480 loss: 0.0618947\n",
      "Step: 3490 loss: 0.0613686\n",
      "Step: 3500 loss: 0.0637272\n",
      "Step: 3510 loss: 0.0628268\n",
      "Step: 3520 loss: 0.0654906\n",
      "Step: 3530 loss: 0.0591994\n",
      "Step: 3540 loss: 0.060334\n",
      "Step: 3550 loss: 0.064682\n",
      "Step: 3560 loss: 0.0681084\n",
      "Step: 3570 loss: 0.0644505\n",
      "Step: 3580 loss: 0.060167\n",
      "Step: 3590 loss: 0.0604309\n",
      "Step: 3600 loss: 0.0595665\n",
      "Step: 3610 loss: 0.0594595\n",
      "Step: 3620 loss: 0.0567945\n",
      "Step: 3630 loss: 0.059069\n",
      "Step: 3640 loss: 0.056851\n",
      "Step: 3650 loss: 0.0574592\n",
      "Step: 3660 loss: 0.0571817\n",
      "Step: 3670 loss: 0.062598\n",
      "Step: 3680 loss: 0.058318\n",
      "Step: 3690 loss: 0.0580298\n",
      "Step: 3700 loss: 0.0570116\n",
      "Step: 3710 loss: 0.0602378\n",
      "Step: 3720 loss: 0.0611698\n",
      "Step: 3730 loss: 0.0566287\n",
      "Step: 3740 loss: 0.06201\n",
      "Step: 3750 loss: 0.0568883\n",
      "Step: 3760 loss: 0.0594586\n",
      "Step: 3770 loss: 0.058497\n",
      "Step: 3780 loss: 0.0584922\n",
      "Step: 3790 loss: 0.0678336\n",
      "Step: 3800 loss: 0.0598645\n",
      "Step: 3810 loss: 0.0560785\n",
      "Step: 3820 loss: 0.0583031\n",
      "Step: 3830 loss: 0.0551893\n",
      "Step: 3840 loss: 0.0550618\n",
      "Step: 3850 loss: 0.0619521\n",
      "Step: 3860 loss: 0.0543501\n",
      "Step: 3870 loss: 0.0555735\n",
      "Step: 3880 loss: 0.0564566\n",
      "Step: 3890 loss: 0.0546295\n",
      "Step: 3900 loss: 0.0562974\n",
      "Step: 3910 loss: 0.056362\n",
      "Step: 3920 loss: 0.0617628\n",
      "Step: 3930 loss: 0.0536657\n",
      "Step: 3940 loss: 0.0604713\n",
      "Step: 3950 loss: 0.0545483\n",
      "Step: 3960 loss: 0.0547408\n",
      "Step: 3970 loss: 0.0573198\n",
      "Step: 3980 loss: 0.0536717\n",
      "Step: 3990 loss: 0.0557231\n",
      "Step: 4000 loss: 0.0539054\n",
      "Saved Model\n",
      "Step: 4010 loss: 0.0548477\n",
      "Step: 4020 loss: 0.0555863\n",
      "Step: 4030 loss: 0.0532781\n",
      "Step: 4040 loss: 0.0582686\n",
      "Step: 4050 loss: 0.0525659\n",
      "Step: 4060 loss: 0.0556034\n",
      "Step: 4070 loss: 0.0554519\n",
      "Step: 4080 loss: 0.0576891\n",
      "Step: 4090 loss: 0.0551396\n",
      "Step: 4100 loss: 0.0529471\n",
      "Step: 4110 loss: 0.0589812\n",
      "Step: 4120 loss: 0.0537085\n",
      "Step: 4130 loss: 0.0513592\n",
      "Step: 4140 loss: 0.0536881\n",
      "Step: 4150 loss: 0.0547232\n",
      "Step: 4160 loss: 0.0532422\n",
      "Step: 4170 loss: 0.0513902\n",
      "Step: 4180 loss: 0.0508279\n",
      "Step: 4190 loss: 0.0528942\n",
      "Step: 4200 loss: 0.0523727\n",
      "Step: 4210 loss: 0.0507534\n",
      "Step: 4220 loss: 0.052219\n",
      "Step: 4230 loss: 0.0563694\n",
      "Step: 4240 loss: 0.0576466\n",
      "Step: 4250 loss: 0.0511018\n",
      "Step: 4260 loss: 0.0568422\n",
      "Step: 4270 loss: 0.0503615\n",
      "Step: 4280 loss: 0.0531739\n",
      "Step: 4290 loss: 0.0510653\n",
      "Step: 4300 loss: 0.0524962\n",
      "Step: 4310 loss: 0.0550859\n",
      "Step: 4320 loss: 0.0508944\n",
      "Step: 4330 loss: 0.0497293\n",
      "Step: 4340 loss: 0.0513726\n",
      "Step: 4350 loss: 0.0506055\n",
      "Step: 4360 loss: 0.0532403\n",
      "Step: 4370 loss: 0.0503815\n",
      "Step: 4380 loss: 0.0493514\n",
      "Step: 4390 loss: 0.0495595\n",
      "Step: 4400 loss: 0.048156\n",
      "Step: 4410 loss: 0.0488454\n",
      "Step: 4420 loss: 0.0481244\n",
      "Step: 4430 loss: 0.0487288\n",
      "Step: 4440 loss: 0.0517458\n",
      "Step: 4450 loss: 0.0485546\n",
      "Step: 4460 loss: 0.048365\n",
      "Step: 4470 loss: 0.0492517\n",
      "Step: 4480 loss: 0.0493081\n",
      "Step: 4490 loss: 0.0467083\n",
      "Step: 4500 loss: 0.0473378\n",
      "Step: 4510 loss: 0.0501069\n",
      "Step: 4520 loss: 0.0485846\n",
      "Step: 4530 loss: 0.0470563\n",
      "Step: 4540 loss: 0.0467402\n",
      "Step: 4550 loss: 0.0478324\n",
      "Step: 4560 loss: 0.0455632\n",
      "Step: 4570 loss: 0.0477886\n",
      "Step: 4580 loss: 0.0498031\n",
      "Step: 4590 loss: 0.0467187\n",
      "Step: 4600 loss: 0.0451887\n",
      "Step: 4610 loss: 0.046056\n",
      "Step: 4620 loss: 0.0509968\n",
      "Step: 4630 loss: 0.0479054\n",
      "Step: 4640 loss: 0.0486915\n",
      "Step: 4650 loss: 0.0499455\n",
      "Step: 4660 loss: 0.0484608\n",
      "Step: 4670 loss: 0.0453398\n",
      "Step: 4680 loss: 0.045155\n",
      "Step: 4690 loss: 0.0453238\n",
      "Step: 4700 loss: 0.0455396\n",
      "Step: 4710 loss: 0.0483179\n",
      "Step: 4720 loss: 0.0490627\n",
      "Step: 4730 loss: 0.0462821\n",
      "Step: 4740 loss: 0.0471759\n",
      "Step: 4750 loss: 0.0439089\n",
      "Step: 4760 loss: 0.0470515\n",
      "Step: 4770 loss: 0.0446155\n",
      "Step: 4780 loss: 0.0446368\n",
      "Step: 4790 loss: 0.0556418\n",
      "Step: 4800 loss: 0.0435886\n",
      "Step: 4810 loss: 0.0449878\n",
      "Step: 4820 loss: 0.0430764\n",
      "Step: 4830 loss: 0.0436248\n",
      "Step: 4840 loss: 0.043485\n",
      "Step: 4850 loss: 0.0442026\n",
      "Step: 4860 loss: 0.0444389\n",
      "Step: 4870 loss: 0.0434672\n",
      "Step: 4880 loss: 0.0480837\n",
      "Step: 4890 loss: 0.0429426\n",
      "Step: 4900 loss: 0.0430241\n",
      "Step: 4910 loss: 0.0439857\n",
      "Step: 4920 loss: 0.0451498\n",
      "Step: 4930 loss: 0.0429454\n",
      "Step: 4940 loss: 0.0418745\n",
      "Step: 4950 loss: 0.0451776\n",
      "Step: 4960 loss: 0.045006\n",
      "Step: 4970 loss: 0.0453885\n",
      "Step: 4980 loss: 0.0420052\n",
      "Step: 4990 loss: 0.040756\n",
      "Step: 5000 loss: 0.0410346\n",
      "Saved Model\n",
      "Step: 5010 loss: 0.0414333\n",
      "Step: 5020 loss: 0.0437712\n",
      "Step: 5030 loss: 0.0481942\n",
      "Step: 5040 loss: 0.0423514\n",
      "Step: 5050 loss: 0.0419343\n",
      "Step: 5060 loss: 0.0449574\n",
      "Step: 5070 loss: 0.0445536\n",
      "Step: 5080 loss: 0.0403328\n",
      "Step: 5090 loss: 0.0414398\n",
      "Step: 5100 loss: 0.0430326\n",
      "Step: 5110 loss: 0.0418498\n",
      "Step: 5120 loss: 0.0426276\n",
      "Step: 5130 loss: 0.042384\n",
      "Step: 5140 loss: 0.0389891\n",
      "Step: 5150 loss: 0.0409798\n",
      "Step: 5160 loss: 0.0419294\n",
      "Step: 5170 loss: 0.0414362\n",
      "Step: 5180 loss: 0.0429289\n",
      "Step: 5190 loss: 0.0424648\n",
      "Step: 5200 loss: 0.0386643\n",
      "Step: 5210 loss: 0.0398064\n",
      "Step: 5220 loss: 0.0419937\n",
      "Step: 5230 loss: 0.0432859\n",
      "Step: 5240 loss: 0.0393222\n",
      "Step: 5250 loss: 0.0425956\n",
      "Step: 5260 loss: 0.0383359\n",
      "Step: 5270 loss: 0.0392411\n",
      "Step: 5280 loss: 0.0396958\n",
      "Step: 5290 loss: 0.043573\n",
      "Step: 5300 loss: 0.0400306\n",
      "Step: 5310 loss: 0.0379942\n",
      "Step: 5320 loss: 0.038301\n",
      "Step: 5330 loss: 0.0388066\n",
      "Step: 5340 loss: 0.0410826\n",
      "Step: 5350 loss: 0.0379581\n",
      "Step: 5360 loss: 0.0376396\n",
      "Step: 5370 loss: 0.0371451\n",
      "Step: 5380 loss: 0.0378995\n",
      "Step: 5390 loss: 0.0400047\n",
      "Step: 5400 loss: 0.0393353\n",
      "Step: 5410 loss: 0.0399644\n",
      "Step: 5420 loss: 0.0391706\n"
     ]
    }
   ],
   "source": [
    "num_epochs = 20\n",
    "for i in range(num_epochs):\n",
    "    for j in range(num_batches):\n",
    "        path_dict = [path1_len[j*batch_size:(j+1)*batch_size], path2_len[j*batch_size:(j+1)*batch_size]]\n",
    "        word_dict = [word_p1_ids[j*batch_size:(j+1)*batch_size], word_p2_ids[j*batch_size:(j+1)*batch_size]]\n",
    "        pos_dict = [pos_p1_ids[j*batch_size:(j+1)*batch_size], pos_p2_ids[j*batch_size:(j+1)*batch_size]]\n",
    "        dep_dict = [dep_p1_ids[j*batch_size:(j+1)*batch_size], dep_p2_ids[j*batch_size:(j+1)*batch_size]]\n",
    "        y_dict = rel_ids[j*batch_size:(j+1)*batch_size]\n",
    "        \n",
    "        feed_dict = {\n",
    "            path_length:path_dict,\n",
    "            word_ids:word_dict,\n",
    "            pos_ids:pos_dict,\n",
    "            dep_ids:dep_dict,\n",
    "            y:y_dict}\n",
    "        _, loss, step = sess.run([optimizer, total_loss, global_step], feed_dict)\n",
    "        if step%10==0:\n",
    "            print(\"Step:\", step, \"loss:\",loss)\n",
    "        if step % 1000 == 0:\n",
    "            saver.save(sess, model_dir + '/model')\n",
    "            print(\"Saved Model\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "training accuracy 100.0\n"
     ]
    }
   ],
   "source": [
    "# training accuracy\n",
    "all_predictions = []\n",
    "for j in range(num_batches):\n",
    "    path_dict = [path1_len[j*batch_size:(j+1)*batch_size], path2_len[j*batch_size:(j+1)*batch_size]]\n",
    "    word_dict = [word_p1_ids[j*batch_size:(j+1)*batch_size], word_p2_ids[j*batch_size:(j+1)*batch_size]]\n",
    "    pos_dict = [pos_p1_ids[j*batch_size:(j+1)*batch_size], pos_p2_ids[j*batch_size:(j+1)*batch_size]]\n",
    "    dep_dict = [dep_p1_ids[j*batch_size:(j+1)*batch_size], dep_p2_ids[j*batch_size:(j+1)*batch_size]]\n",
    "    y_dict = rel_ids[j*batch_size:(j+1)*batch_size]\n",
    "\n",
    "    feed_dict = {\n",
    "        path_length:path_dict,\n",
    "        word_ids:word_dict,\n",
    "        pos_ids:pos_dict,\n",
    "        dep_ids:dep_dict,\n",
    "        y:y_dict}\n",
    "    batch_predictions = sess.run(predictions, feed_dict)\n",
    "    all_predictions.append(batch_predictions)\n",
    "\n",
    "y_pred = []\n",
    "for i in range(num_batches):\n",
    "    for pred in all_predictions[i]:\n",
    "        y_pred.append(pred)\n",
    "\n",
    "count = 0\n",
    "for i in range(batch_size*num_batches):\n",
    "    count += y_pred[i]==rel_ids[i]\n",
    "accuracy = count/(batch_size*num_batches) * 100\n",
    "\n",
    "print(\"training accuracy\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "f = open(data_dir + '/test_paths', 'rb')\n",
    "word_p1, word_p2, dep_p1, dep_p2, pos_p1, pos_p2 = pickle.load(f)\n",
    "f.close()\n",
    "\n",
    "relations = []\n",
    "for line in open(data_dir + '/test_relations.txt'):\n",
    "    relations.append(line.strip().split()[0])\n",
    "\n",
    "length = len(word_p1)\n",
    "num_batches = int(length/batch_size)\n",
    "\n",
    "for i in range(length):\n",
    "    for j, word in enumerate(word_p1[i]):\n",
    "        word = word.lower()\n",
    "        word_p1[i][j] = word if word in word2id else unknown_token \n",
    "    for k, word in enumerate(word_p2[i]):\n",
    "        word = word.lower()\n",
    "        word_p2[i][k] = word if word in word2id else unknown_token \n",
    "    for l, d in enumerate(dep_p1[i]):\n",
    "        dep_p1[i][l] = d if d in dep2id else 'OTH'\n",
    "    for m, d in enumerate(dep_p2[i]):\n",
    "        dep_p2[i][m] = d if d in dep2id else 'OTH'\n",
    "\n",
    "word_p1_ids = np.ones([length, max_len_path],dtype=int)\n",
    "word_p2_ids = np.ones([length, max_len_path],dtype=int)\n",
    "pos_p1_ids = np.ones([length, max_len_path],dtype=int)\n",
    "pos_p2_ids = np.ones([length, max_len_path],dtype=int)\n",
    "dep_p1_ids = np.ones([length, max_len_path],dtype=int)\n",
    "dep_p2_ids = np.ones([length, max_len_path],dtype=int)\n",
    "rel_ids = np.array([rel2id[rel] for rel in relations])\n",
    "path1_len = np.array([len(w) for w in word_p1], dtype=int)\n",
    "path2_len = np.array([len(w) for w in word_p2])\n",
    "\n",
    "for i in range(length):\n",
    "    for j, w in enumerate(word_p1[i]):\n",
    "        word_p1_ids[i][j] = word2id[w]\n",
    "    for j, w in enumerate(word_p2[i]):\n",
    "        word_p2_ids[i][j] = word2id[w]\n",
    "    for j, w in enumerate(pos_p1[i]):\n",
    "        pos_p1_ids[i][j] = pos_tag(w)\n",
    "    for j, w in enumerate(pos_p2[i]):\n",
    "        pos_p2_ids[i][j] = pos_tag(w)\n",
    "    for j, w in enumerate(dep_p1[i]):\n",
    "        dep_p1_ids[i][j] = dep2id[w]\n",
    "    for j, w in enumerate(dep_p2[i]):\n",
    "        dep_p2_ids[i][j] = dep2id[w]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# test \n",
    "all_predictions = []\n",
    "for j in range(num_batches):\n",
    "    path_dict = [path1_len[j*batch_size:(j+1)*batch_size], path2_len[j*batch_size:(j+1)*batch_size]]\n",
    "    word_dict = [word_p1_ids[j*batch_size:(j+1)*batch_size], word_p2_ids[j*batch_size:(j+1)*batch_size]]\n",
    "    pos_dict = [pos_p1_ids[j*batch_size:(j+1)*batch_size], pos_p2_ids[j*batch_size:(j+1)*batch_size]]\n",
    "    dep_dict = [dep_p1_ids[j*batch_size:(j+1)*batch_size], dep_p2_ids[j*batch_size:(j+1)*batch_size]]\n",
    "    y_dict = rel_ids[j*batch_size:(j+1)*batch_size]\n",
    "\n",
    "    feed_dict = {\n",
    "        path_length:path_dict,\n",
    "        word_ids:word_dict,\n",
    "        pos_ids:pos_dict,\n",
    "        dep_ids:dep_dict,\n",
    "        y:y_dict}\n",
    "    batch_predictions = sess.run(predictions, feed_dict)\n",
    "    all_predictions.append(batch_predictions)\n",
    "\n",
    "y_pred = []\n",
    "for i in range(num_batches):\n",
    "    for pred in all_predictions[i]:\n",
    "        y_pred.append(pred)\n",
    "\n",
    "count = 0\n",
    "for i in range(batch_size*num_batches):\n",
    "    count += y_pred[i]==rel_ids[i]\n",
    "accuracy = count/(batch_size*num_batches) * 100\n",
    "\n",
    "print(\"test accuracy\", accuracy)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
