{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.contrib import learn\n",
    "from sklearn.externals import joblib\n",
    "import data_helpers\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import os\n",
    "from NPDLogger import NPDLogger"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "model_path = '/temp0/dev/models/word2vec_cnn/'\n",
    "training_data = '/temp0/dev/models/temp_file_videoscan_train_train.dat'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "total 7920072\r\n",
      "-rw-r--r-- 1 aidev aidev    1150044 Feb  6 15:59 turgay_WM_input_records.dat\r\n",
      "-rw-r--r-- 1 aidev aidev  192726845 Feb  6 15:59 turgay_WM_reference_data.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev    1142326 Feb  7 22:34 WM_input_records.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev  191127145 Feb  8 15:15 WM_reference_data_X.dat\r\n",
      "drwxrwxr-x 2 aidev aidev       4096 Feb  8 15:15 word2vec_\r\n",
      "-rw-rw-r-- 1 aidev aidev  322820082 Feb  9 00:01 US_reference_data_X.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev   55000000 Feb  9 00:01 US_reference_data_Y.dat\r\n",
      "drwxrwxr-x 6 aidev aidev       4096 Feb 16 22:25 word2vec\r\n",
      "-rw-rw-r-- 1 aidev aidev        931 Feb 17 14:32 industry_shortname_lookup.dat\r\n",
      "drwxrwxr-x 2 aidev aidev       4096 Feb 24 01:08 word2vec_match\r\n",
      "drwxrwxr-x 4 aidev aidev       4096 Mar 15 17:44 cnn_text\r\n",
      "-rw-rw-r-- 1 aidev aidev 2789250379 Mar 17 12:31 temp_file5_macy.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev 4173529088 Mar 17 12:33 temp_file5_amazon.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev          0 Mar 18 15:23 temp_file5_macy_2_test.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev    1970773 Mar 18 15:23 temp_file5_amazon_2_train.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev   13977343 Mar 19 12:08 temp_file_videoscan_input.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev  312664281 Mar 19 12:08 temp_file_videoscan_train.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev    2493754 Mar 21 22:49 temp_file_videoscan_train_test.dat\r\n",
      "-rw-rw-r-- 1 aidev aidev   52241739 Mar 21 22:49 temp_file_videoscan_train_train.dat\r\n",
      "drwxrwxr-x 2 aidev aidev       4096 Mar 23 01:55 word2vec_cnn\r\n"
     ]
    }
   ],
   "source": [
    "!ls -lrt /temp0/dev/models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "#! ls  /temp0/dev/models/word2vec_cnn/\n",
    "def line_tokenize(Linelist=None):\n",
    "        data_2dlist = []\n",
    "        for i in xrange(len(Linelist)):\n",
    "                data_2dlist.append(tf.compat.as_str(Linelist[i]).split())\n",
    "        return data_2dlist\n",
    "\n",
    "\n",
    "def Word2index2d(data_2dlist=None,dictionary=None):\n",
    "        NewWord2index2d_inner = []\n",
    "        NewWord2index2d = []\n",
    "        for i in xrange(len(data_2dlist)):\n",
    "                for j in xrange(len(data_2dlist[i])):\n",
    "                        try: NewWord2index2d_inner.append(dictionary[data_2dlist[i][j]])\n",
    "                        except: NewWord2index2d_inner.append(0)\n",
    "                NewWord2index2d.append(NewWord2index2d_inner)\n",
    "                NewWord2index2d_inner = []\n",
    "        return NewWord2index2d\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "dictionary = joblib.load(model_path + '/dictionary.pkl')\n",
    "embeddings = joblib.load(model_path + '/embeddings.pkl')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Exception AssertionError: AssertionError(\"Nesting violated for default stack of <type 'weakref'> objects\",) in <bound method InteractiveSession.__del__ of <tensorflow.python.client.session.InteractiveSession object at 0x2ba4990005d0>> ignored\n"
     ]
    }
   ],
   "source": [
    "ss = tf.InteractiveSession()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([-0.14501953,  0.1529541 ,  0.32202148, -0.12744141,  0.17932129,\n",
       "        0.01644897, -0.09558105, -0.15942383,  0.07507324,  0.07745361,\n",
       "        0.07635498, -0.17663574,  0.20129395, -0.00812531,  0.10998535,\n",
       "       -0.04302979, -0.02590942,  0.14440918, -0.06890869, -0.08502197,\n",
       "        0.02037048, -0.00486755,  0.10266113, -0.02444458,  0.11676025,\n",
       "       -0.03344727, -0.07342529,  0.22045898,  0.10626221,  0.07501221,\n",
       "       -0.16479492, -0.02461243,  0.0602417 , -0.16491699, -0.10968018,\n",
       "       -0.03613281,  0.13085938,  0.00397873, -0.11035156,  0.15014648,\n",
       "        0.10290527,  0.03167725, -0.12103271, -0.16223145,  0.21826172,\n",
       "        0.11328125, -0.04904175, -0.12487793,  0.10699463,  0.03210449,\n",
       "       -0.1854248 ,  0.14331055,  0.0690918 , -0.04736328, -0.06939697,\n",
       "       -0.03120422, -0.22290039, -0.19018555,  0.15673828,  0.05728149,\n",
       "       -0.12976074, -0.02476501, -0.16821289, -0.20080566], dtype=float16)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ss.run(tf.cast(tf.nn.embedding_lookup(embeddings,dictionary[\"spiderm\"]),tf.float16))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "dictionary"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "#dictionary\n",
    "max_document_length  = 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "vocab_process = learn.preprocessing.VocabularyProcessor(max_document_length =max_document_length)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "learn.preprocessing.VocabularyProcessor?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "x_text_test, x_text_train, y = data_helpers.load_data_and_labels(training_data)\n",
    "del x_text_test\n",
    "del y\n",
    "\n",
    "words_ = [tf.compat.as_str(x).split() for x in x_text_train]\n",
    "words = [item for sublist in words_ for item in sublist]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[ 1,  2,  3, ...,  0,  0,  0],\n",
       "       [ 1, 17, 14, ...,  0,  0,  0],\n",
       "       [ 1,  2, 29, ...,  0,  0,  0],\n",
       "       ..., \n",
       "       [38, 39, 14, ..., 28,  0,  0],\n",
       "       [ 1, 17, 14, ...,  0,  0,  0],\n",
       "       [ 1, 17, 14, ...,  0,  0,  0]])"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.array(list(vocab_process.fit_transform(x_text_train)))\n",
    "#tf.nn.embedding_lookup(embeddings,dictionary[word])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "43712"
      ]
     },
     "execution_count": 74,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(dictionary)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([-0.14276664,  0.13686794,  0.08431825,  0.04368779, -0.23479709,\n",
       "        0.12103961, -0.20136392, -0.01855483, -0.13536727, -0.0738584 ,\n",
       "        0.07063076,  0.21159935,  0.21945481, -0.00230686,  0.02664221,\n",
       "        0.02548772,  0.18124405,  0.11112645, -0.03073082,  0.2362041 ,\n",
       "       -0.17198046, -0.04571034, -0.35279286, -0.12136268,  0.15105487,\n",
       "       -0.02880125,  0.04943464,  0.05526092,  0.04890934,  0.0867354 ,\n",
       "        0.03599857, -0.05323701,  0.02578104,  0.04870079, -0.06361986,\n",
       "       -0.05145854, -0.14232683,  0.07273249, -0.02789169, -0.0128205 ,\n",
       "       -0.10631222,  0.39017111,  0.04733137,  0.05812557,  0.05407018,\n",
       "        0.07828428,  0.06952811, -0.04604926, -0.10090795, -0.05623287,\n",
       "       -0.11622163, -0.03263419,  0.09103311, -0.11683894,  0.07250867,\n",
       "       -0.07022893, -0.1188589 , -0.19986254, -0.02853341,  0.17888133,\n",
       "       -0.15916334, -0.03696275,  0.08363436, -0.01170939], dtype=float32)"
      ]
     },
     "execution_count": 77,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ss.run(tf.nn.embedding_lookup(embeddings,0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "43712"
      ]
     },
     "execution_count": 83,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(dictionary)\n",
    "#words"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'embeddings' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-1-a8d916645d87>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0membeddings\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m: name 'embeddings' is not defined"
     ]
    }
   ],
   "source": [
    "embeddings.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "new_emb =[]\n",
    "for word in words:\n",
    "    try: new_emb.append(ss.run(tf.nn.embedding_lookup(embeddings,dictionary[word])))\n",
    "    except: new_emb.append(np.zeros(64))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "56256"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(vocab_process.vocabulary_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor 'Const:0' shape=(32,) dtype=float64>"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tf.constant(np.zeros(32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[ 0.,  0.],\n",
       "       [ 0.,  0.],\n",
       "       [ 0.,  0.],\n",
       "       [ 0.,  0.],\n",
       "       [ 0.,  0.],\n",
       "       [ 0.,  0.],\n",
       "       [ 0.,  0.],\n",
       "       [ 0.,  0.],\n",
       "       [ 0.,  0.],\n",
       "       [ 0.,  0.]])"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ss.run(tf.concat(0,(tf.constant(np.zeros(10).reshape(5,2)),np.zeros(10).reshape(5,2))))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "   INFO [03/23/2017 05:00:59 PM] [<ipython-input-65-f13c877cee84> MainThread] [<module>] [27]: **** Program START *****************\n",
      "INFO:/abinitio/dev/data/work/serial/ml_logs/temp_10000:**** Program START *****************\n"
     ]
    },
    {
     "ename": "ArgumentError",
     "evalue": "argument --input_filename_train: conflicting option string(s): --input_filename_train",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mArgumentError\u001b[0m                             Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-65-f13c877cee84>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m     27\u001b[0m \u001b[0mlog\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"**** Program START *****************\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     28\u001b[0m \u001b[1;31m# Data Parameters\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 29\u001b[1;33m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mflags\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDEFINE_string\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"input_filename_train\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"/temp0/dev/models/temp_file_videoscan_train_train.dat\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"Data source input training items acting as reference.\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     30\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mflags\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDEFINE_string\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"input_filename_test\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"/temp0/dev/models/temp_file_videoscan_train_test.dat\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"Data source input test items that will be matched to the reference embedded\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     31\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/site-packages/tensorflow/python/platform/flags.pyc\u001b[0m in \u001b[0;36mDEFINE_string\u001b[1;34m(flag_name, default_value, docstring)\u001b[0m\n\u001b[0;32m     71\u001b[0m     \u001b[0mdocstring\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mA\u001b[0m \u001b[0mhelpful\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0mexplaining\u001b[0m \u001b[0mthe\u001b[0m \u001b[0muse\u001b[0m \u001b[0mof\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mflag\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     72\u001b[0m   \"\"\"\n\u001b[1;32m---> 73\u001b[1;33m   \u001b[0m_define_helper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mflag_name\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdefault_value\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdocstring\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     74\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     75\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/site-packages/tensorflow/python/platform/flags.pyc\u001b[0m in \u001b[0;36m_define_helper\u001b[1;34m(flag_name, default_value, docstring, flagtype)\u001b[0m\n\u001b[0;32m     56\u001b[0m                               \u001b[0mdefault\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdefault_value\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     57\u001b[0m                               \u001b[0mhelp\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdocstring\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 58\u001b[1;33m                               type=flagtype)\n\u001b[0m\u001b[0;32m     59\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     60\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/argparse.pyc\u001b[0m in \u001b[0;36madd_argument\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1306\u001b[0m                 \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"length of metavar tuple does not match nargs\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1307\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1308\u001b[1;33m         \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_add_action\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1309\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1310\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0madd_argument_group\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/argparse.pyc\u001b[0m in \u001b[0;36m_add_action\u001b[1;34m(self, action)\u001b[0m\n\u001b[0;32m   1680\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_add_action\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1681\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0maction\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moption_strings\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1682\u001b[1;33m             \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_optionals\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_add_action\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1683\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1684\u001b[0m             \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_positionals\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_add_action\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/argparse.pyc\u001b[0m in \u001b[0;36m_add_action\u001b[1;34m(self, action)\u001b[0m\n\u001b[0;32m   1507\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1508\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_add_action\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1509\u001b[1;33m         \u001b[0maction\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_ArgumentGroup\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_add_action\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1510\u001b[0m         \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_group_actions\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1511\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0maction\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/argparse.pyc\u001b[0m in \u001b[0;36m_add_action\u001b[1;34m(self, action)\u001b[0m\n\u001b[0;32m   1320\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_add_action\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1321\u001b[0m         \u001b[1;31m# resolve any conflicts\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1322\u001b[1;33m         \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_check_conflict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maction\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1323\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1324\u001b[0m         \u001b[1;31m# add to actions list\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/argparse.pyc\u001b[0m in \u001b[0;36m_check_conflict\u001b[1;34m(self, action)\u001b[0m\n\u001b[0;32m   1458\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mconfl_optionals\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1459\u001b[0m             \u001b[0mconflict_handler\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_get_handler\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1460\u001b[1;33m             \u001b[0mconflict_handler\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maction\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mconfl_optionals\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1461\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1462\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_handle_conflict_error\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maction\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mconflicting_actions\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/argparse.pyc\u001b[0m in \u001b[0;36m_handle_conflict_error\u001b[1;34m(self, action, conflicting_actions)\u001b[0m\n\u001b[0;32m   1465\u001b[0m                                      \u001b[1;32mfor\u001b[0m \u001b[0moption_string\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maction\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1466\u001b[0m                                      in conflicting_actions])\n\u001b[1;32m-> 1467\u001b[1;33m         \u001b[1;32mraise\u001b[0m \u001b[0mArgumentError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maction\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmessage\u001b[0m \u001b[1;33m%\u001b[0m \u001b[0mconflict_string\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1468\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1469\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_handle_conflict_resolve\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maction\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mconflicting_actions\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mArgumentError\u001b[0m: argument --input_filename_train: conflicting option string(s): --input_filename_train"
     ]
    }
   ],
   "source": [
    "#! /usr/bin/env python\n",
    "\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import os, sys\n",
    "import time\n",
    "import datetime\n",
    "import data_helpers\n",
    "from text_cnn import TextCNN\n",
    "from tensorflow.contrib import learn\n",
    "import csv\n",
    "from sklearn.externals import joblib\n",
    "from NPDLogger import NPDLogger\n",
    "\n",
    "from subprocess import Popen, PIPE\n",
    "#import os, sys, re,cx_Oracle, errno\n",
    "# Parameters\n",
    "# ==================================================\n",
    "#timetamp = str(int(sys.argv[1]))\n",
    "\n",
    "\n",
    "\n",
    "timetamp = '10000'\n",
    "log_severity_level = 'debug'\n",
    "log = NPDLogger(log_file_name='temp'+'_'+timetamp,log_severity_level = log_severity_level,console=True).log()\n",
    "log.info(\"**** Program START *****************\")\n",
    "# Data Parameters\n",
    "tf.flags.DEFINE_string(\"input_filename_train\", \"/temp0/dev/models/temp_file_videoscan_train_train.dat\", \"Data source input training items acting as reference.\")\n",
    "tf.flags.DEFINE_string(\"input_filename_test\", \"/temp0/dev/models/temp_file_videoscan_train_test.dat\", \"Data source input test items that will be matched to the reference embedded\")\n",
    "\n",
    "# Eval Parameters\n",
    "#tf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\n",
    "tf.flags.DEFINE_string(\"checkpoint_dir\", \"/temp0/dev/models/word2vec_cnn/runs/\"+timetamp+\"/checkpoints\", \"Checkpoint directory from training run\")\n",
    "tf.flags.DEFINE_string(\"embed_input_dir\", \"/temp0/dev/models/word2vec_cnn/embed_input/\"+timetamp+\"/\", \"Save input intertms of embedding concat\")\n",
    "tf.flags.DEFINE_boolean(\"eval_train\", True, \"Evaluate on all training data\")\n",
    "\n",
    "# Misc Parameters\n",
    "tf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\n",
    "tf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n",
    "\n",
    "\n",
    "FLAGS = tf.flags.FLAGS\n",
    "FLAGS._parse_flags()\n",
    "body = \"\"\n",
    "print(\"\\nParameters:\")\n",
    "for attr, value in sorted(FLAGS.__flags.items()):\n",
    "\tprint(\"{}={}\".format(attr.upper(), value))\n",
    "\tbody = body + \"{}={}\".format(attr.upper(), value) + '\\n'\n",
    "\tlog.info(\"{}={}\".format(attr.upper(), value))\n",
    "print(\"\")\n",
    "if not os.path.exists(FLAGS.embed_input_dir):\n",
    "\tos.makedirs(FLAGS.embed_input_dir)\n",
    "subject = \"CNN_TEXT -\" + timetamp\n",
    "def send_mail(subject, body,email_recepients='anjani.sharma@npd.com'):\n",
    "        read_msg = Popen([\"echo\", body], stdout=PIPE)\n",
    "       \tmail = Popen([\"mail\", \"-s\", subject, email_recepients], stdin=read_msg.stdout, stdout=PIPE)\n",
    "\toutput = mail.communicate()[0]\n",
    "\n",
    "def get_x_y(input_filename,data_type, nrows=0):\n",
    "#input_names=['poiid','itemid','posoutlet','subcategoryn','brand','X_train','X_test']\n",
    "    if data_type == 'train':\n",
    "        input_names=['itemid','Y','X_train','X_test']\n",
    "    else:\n",
    "        input_names=['itemid','X_test']\n",
    "    if nrows ==0:\n",
    "        dt = pd.read_csv(input_filename,header=None,delimiter=\"|\",error_bad_lines=0,names=input_names,quoting=3)\n",
    "    else:\n",
    "        dt = pd.read_csv(input_filename,header=None,delimiter=\"|\",error_bad_lines=0,names=input_names,quoting=3,nrows = nrows)\n",
    "    dt = dt.fillna('')\n",
    "    if data_type == 'train':\n",
    "        x_text = [data_helpers.clean_str(sent) for sent in dt.X_train.tolist()]\n",
    "    else:\n",
    "        x_text = [data_helpers.clean_str(sent) for sent in dt.X_test.tolist()]\n",
    "\n",
    "    vocab_path = os.path.join(FLAGS.checkpoint_dir,\"..\", \"vocab\")\n",
    "    vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\n",
    "    x = np.array(list(vocab_processor.transform(x_text)))\n",
    "    y_itemid = dt['itemid'].astype(np.int32)\n",
    "    log.info(\"%s ItemId Distinct count: %d\" %(data_type,len(set(y_itemid))))\n",
    "    y_subcategoryn = dt['itemid'].astype(np.int32)\n",
    "    #log.info(\"%s Subcategory Distinct count: %d\" %(data_type,len(set(y_subcategoryn))))\n",
    "    return (x,y_itemid,y_subcategoryn)\n",
    "\n",
    "def convert_from_embed_raw(x, pickle=False):\n",
    "    embedding = joblib.load('/temp0/dev/models/word2vec_cnn/embeddings.pkl')\n",
    "    vocab_size = embedding.shape[0]\n",
    "    embedding_size = embedding.shape[1]\n",
    "    checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n",
    "    graph = tf.Graph()\n",
    "    with graph.as_default():\n",
    "        sess = tf.Session()\n",
    "        with sess.as_default():\n",
    "            saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n",
    "            saver.restore(sess, checkpoint_file)\n",
    "            input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n",
    "            embedding_W = tf.placeholder(tf.float32, [vocab_size, embedding_size])\n",
    "            embedded_chars = tf.nn.embedding_lookup(embedding_W, input_x)\n",
    "            all_predictions = sess.run(embedded_chars, {input_x: x,embedding_W: embedding})\n",
    "            print all_predictions.shape\n",
    "            all_predictions = all_predictions.reshape(all_predictions.shape[0],all_predictions.shape[1]*all_predictions.shape[2])\t\n",
    "            print all_predictions.shape\n",
    "            if pickle:\n",
    "                joblib.dump(all_predictions, FLAGS.embed_input_dir + 'embedding_raw.pkl')\n",
    "    return all_predictions\n",
    "\n",
    "\n",
    "def convert_from_embed(x, pickle=False):\n",
    "    checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n",
    "    graph = tf.Graph()\n",
    "    with graph.as_default():\n",
    "        sess = tf.Session()\n",
    "        with sess.as_default():\n",
    "            saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n",
    "            saver.restore(sess, checkpoint_file)\n",
    "            input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n",
    "            embedding_W = graph.get_operation_by_name(\"embedding/W\").outputs[0]\n",
    "            embedded_chars = tf.nn.embedding_lookup(embedding_W, input_x)\n",
    "            all_predictions = sess.run(embedded_chars, {input_x: x})\n",
    "            print all_predictions.shape\n",
    "            all_predictions = all_predictions.reshape(all_predictions.shape[0],all_predictions.shape[1]*all_predictions.shape[2])\t\n",
    "            print all_predictions.shape\n",
    "            if pickle:\n",
    "                joblib.dump(all_predictions, FLAGS.embed_input_dir + 'embedding.pkl')\n",
    "    return all_predictions\n",
    "\n",
    "def convert_from_maxpool(x, pickle=False):\n",
    "    checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n",
    "    graph = tf.Graph()\n",
    "    with graph.as_default():\n",
    "        sess = tf.Session()\n",
    "        with sess.as_default():\n",
    "            saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n",
    "            saver.restore(sess, checkpoint_file)\n",
    "            input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n",
    "            dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n",
    "            maxpool_drop = graph.get_operation_by_name(\"dropout/h_drop/mul_1\").outputs[0]\n",
    "            all_predictions = sess.run(maxpool_drop, {input_x: x,dropout_keep_prob: 1.0 })\n",
    "            print all_predictions.shape\n",
    "            if pickle:\n",
    "                joblib.dump(all_predictions, FLAGS.embed_input_dir + 'maxpool.pkl')\n",
    "    return all_predictions\n",
    "\n",
    "#  Load raw files\t\n",
    "log.info(\"Running get_x_y - test\")\n",
    "#x, y_itemid,y_subcategoryn = get_x_y(input_filename=FLAGS.input_filename_test,data_type = 'test')\n",
    "log.info(\"Running get_x_y - ref\")\n",
    "#x_train, y_train_itemid,y_train_subcategoryn = get_x_y(input_filename=FLAGS.input_filename_train, data_type = 'train')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([  38,   39,   14,   40,   14,   41, 5325,  167,  168,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,\n",
       "          0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0])"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(29400,)"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_itemid.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(222900, 99)"
      ]
     },
     "execution_count": 39,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "input_names=['itemid','Y','X_test']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "input_names=['itemid','Y','X_train','X_test']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "dt = pd.read_csv(training_data,header=None,delimiter=\"|\",error_bad_lines=0,names=input_names,quoting=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    ACNVideoScan-Historical First Alert    LITTLE ...\n",
       "1    ACNVideoScan-Rovi   VIDEO BRAND MY BEST FRIEND...\n",
       "2    ACNVideoScan-Historical US    TIGERS HOMETOWN ...\n",
       "3    Amazon.com VIDEO MUSIC VIDEO & CONCERTS IMAGE ...\n",
       "4           ACNVideoScan-Rovi   VIDEO BRAND FAIRY IDOL\n",
       "5    ACNVideoScan-Historical First Alert    SNOWBER...\n",
       "6    ACNVideoScan-Historical First Alert    GIRLS O...\n",
       "7     ACNVideoScan-Rovi   VIDEO BRAND DESPERADO (1995)\n",
       "8    ACNVideoScan-Historical Canada    STARSHIP TRO...\n",
       "9    Amazon.com VIDEO MUSIC VIDEO & CONCERTS IMAGE ...\n",
       "Name: X_test, dtype: object"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dt.iloc[:10].X_test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    ACNVideoScan-Historical First Alert    LITTLE ...\n",
       "1    ACNVideoScan-Rovi   VIDEO BRAND MY BEST FRIEND...\n",
       "2    ACNVideoScan-Historical US    TIGERS HOMETOWN ...\n",
       "3    Amazon.com VIDEO MUSIC VIDEO & CONCERTS IMAGE ...\n",
       "4    ACNVideoScan-Rovi   VIDEO BRAND FAIRY IDOL Chi...\n",
       "5    ACNVideoScan-Historical First Alert    SNOWBER...\n",
       "6    ACNVideoScan-Historical First Alert    GIRLS O...\n",
       "7    ACNVideoScan-Rovi   VIDEO BRAND DESPERADO (199...\n",
       "8    ACNVideoScan-Historical Canada    STARSHIP TRO...\n",
       "9    Amazon.com VIDEO MUSIC VIDEO & CONCERTS IMAGE ...\n",
       "Name: X_train, dtype: object"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dt.iloc[:10].X_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(1, 99, 64)\n",
      "(1, 6336)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array([[-0.13423242,  0.02163813,  0.31426671, ...,  0.11207271,\n",
       "        -0.11200023,  0.00673293]], dtype=float32)"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "convert_from_embed_raw([x_train[0]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "ename": "KeyError",
     "evalue": "\"The name 'embedding_1/W' refers to an Operation not in the graph.\"",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyError\u001b[0m                                  Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-53-aaea6fdb730b>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mconvert_from_embed\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mx_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32m<ipython-input-34-2341b0d5e87b>\u001b[0m in \u001b[0;36mconvert_from_embed\u001b[1;34m(x, pickle)\u001b[0m\n\u001b[0;32m    112\u001b[0m             \u001b[0msaver\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrestore\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msess\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcheckpoint_file\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    113\u001b[0m             \u001b[0minput_x\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgraph\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_operation_by_name\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"input_x\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 114\u001b[1;33m             \u001b[0membedding_W\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgraph\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_operation_by_name\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"embedding_1/W\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    115\u001b[0m             \u001b[0membedded_chars\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0membedding_lookup\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0membedding_W\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput_x\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    116\u001b[0m             \u001b[0mall_predictions\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msess\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0membedded_chars\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m{\u001b[0m\u001b[0minput_x\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/site-packages/tensorflow/python/framework/ops.pyc\u001b[0m in \u001b[0;36mget_operation_by_name\u001b[1;34m(self, name)\u001b[0m\n\u001b[0;32m   2508\u001b[0m       raise TypeError(\"Operation names are strings (or similar), not %s.\"\n\u001b[0;32m   2509\u001b[0m                       % type(name).__name__)\n\u001b[1;32m-> 2510\u001b[1;33m     \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_graph_element\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mallow_tensor\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mFalse\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mallow_operation\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   2511\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2512\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0mget_tensor_by_name\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/site-packages/tensorflow/python/framework/ops.pyc\u001b[0m in \u001b[0;36mas_graph_element\u001b[1;34m(self, obj, allow_tensor, allow_operation)\u001b[0m\n\u001b[0;32m   2383\u001b[0m     \"\"\"\n\u001b[0;32m   2384\u001b[0m     \u001b[1;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_lock\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2385\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_as_graph_element_locked\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mallow_tensor\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mallow_operation\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   2386\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2387\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_as_graph_element_locked\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mallow_tensor\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mallow_operation\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m/home/aidev/anaconda/lib/python2.7/site-packages/tensorflow/python/framework/ops.pyc\u001b[0m in \u001b[0;36m_as_graph_element_locked\u001b[1;34m(self, obj, allow_tensor, allow_operation)\u001b[0m\n\u001b[0;32m   2443\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mname\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_nodes_by_name\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2444\u001b[0m           raise KeyError(\"The name %s refers to an Operation not in the \"\n\u001b[1;32m-> 2445\u001b[1;33m                          \"graph.\" % repr(name))\n\u001b[0m\u001b[0;32m   2446\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_nodes_by_name\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2447\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyError\u001b[0m: \"The name 'embedding_1/W' refers to an Operation not in the graph.\""
     ]
    }
   ],
   "source": [
    "#convert_from_embed([x_train[0]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def convert_from_embed(x, pickle=False):\n",
    "    checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n",
    "    graph = tf.Graph()\n",
    "    with graph.as_default():\n",
    "        sess = tf.Session()\n",
    "        with sess.as_default():\n",
    "            saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n",
    "            saver.restore(sess, checkpoint_file)\n",
    "            input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n",
    "            embedding_W = graph.get_operation_by_name(\"embedding/W\").outputs[0]\n",
    "            embedded_chars = tf.nn.embedding_lookup(embedding_W, input_x)\n",
    "            all_predictions = sess.run(embedded_chars, {input_x: x})\n",
    "            print all_predictions.shape\n",
    "            all_predictions = all_predictions.reshape(all_predictions.shape[0],all_predictions.shape[1]*all_predictions.shape[2])\t\n",
    "            print all_predictions.shape\n",
    "            if pickle:\n",
    "                joblib.dump(all_predictions, FLAGS.embed_input_dir + 'embedding.pkl')\n",
    "    return all_predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "global name 'FLAGS' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-64-aaea6fdb730b>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mconvert_from_embed\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mx_train\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32m<ipython-input-63-3775f4903fc1>\u001b[0m in \u001b[0;36mconvert_from_embed\u001b[1;34m(x, pickle)\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mconvert_from_embed\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mpickle\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m     \u001b[0mcheckpoint_file\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlatest_checkpoint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mFLAGS\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcheckpoint_dir\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      3\u001b[0m     \u001b[0mgraph\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m     \u001b[1;32mwith\u001b[0m \u001b[0mgraph\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m         \u001b[0msess\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mSession\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: global name 'FLAGS' is not defined"
     ]
    }
   ],
   "source": [
    "convert_from_embed([x_train[0]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
