{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import time\n",
    "import os\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "import re\n",
    "import collections\n",
    "import random\n",
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def clearstring(string):\n",
    "    string = re.sub('[^\\'\\\"A-Za-z0-9 ]+', '', string)\n",
    "    string = string.split(' ')\n",
    "    string = filter(None, string)\n",
    "    string = [y.strip() for y in string]\n",
    "    string = [y for y in string if len(y) > 3 and y.find('nbsp') < 0]\n",
    "    return ' '.join(string)\n",
    "\n",
    "def read_data():\n",
    "    list_folder = os.listdir('data/')\n",
    "    label = list_folder\n",
    "    label.sort()\n",
    "    outer_string, outer_label = [], []\n",
    "    for i in range(len(list_folder)):\n",
    "        list_file = os.listdir('data/' + list_folder[i])\n",
    "        strings = []\n",
    "        for x in range(len(list_file)):\n",
    "            with open('data/' + list_folder[i] + '/' + list_file[x], 'r') as fopen:\n",
    "                strings += fopen.read().split('\\n')\n",
    "        strings = list(filter(None, strings))\n",
    "        for k in range(len(strings)):\n",
    "            strings[k] = clearstring(strings[k])\n",
    "        labels = [i] * len(strings)\n",
    "        outer_string += strings\n",
    "        outer_label += labels\n",
    "    \n",
    "    dataset = np.array([outer_string, outer_label])\n",
    "    dataset = dataset.T\n",
    "    np.random.shuffle(dataset)\n",
    "    \n",
    "    string = []\n",
    "    for i in range(dataset.shape[0]):\n",
    "        string += dataset[i][0].split()\n",
    "    \n",
    "    return string, dataset, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def build_dataset(words, n_words):\n",
    "    count = [['UNK', -1]]\n",
    "    count.extend(collections.Counter(words).most_common(n_words - 1))\n",
    "    dictionary = dict()\n",
    "    for word, _ in count:\n",
    "        dictionary[word] = len(dictionary)\n",
    "    data = list()\n",
    "    unk_count = 0\n",
    "    for word in words:\n",
    "        index = dictionary.get(word, 0)\n",
    "        if index == 0:  # dictionary['UNK']\n",
    "            unk_count += 1\n",
    "        data.append(index)\n",
    "    count[0][1] = unk_count\n",
    "    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n",
    "    return data, count, dictionary, reversed_dictionary\n",
    "\n",
    "data_index = 0\n",
    "\n",
    "# Step 3: Function to generate a training batch for the skip-gram model.\n",
    "def generate_batch(batch_size, num_skips, skip_window):\n",
    "    global data_index\n",
    "    global data\n",
    "    assert batch_size % num_skips == 0\n",
    "    assert num_skips <= 2 * skip_window\n",
    "    batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n",
    "    labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n",
    "    span = 2 * skip_window + 1  # [ skip_window target skip_window ]\n",
    "    buffer = collections.deque(maxlen=span)\n",
    "    if data_index + span > len(data):\n",
    "        data_index = 0\n",
    "    buffer.extend(data[data_index:data_index + span])\n",
    "    data_index += span\n",
    "    for i in range(batch_size // num_skips):\n",
    "        context_words = [w for w in range(span) if w != skip_window]\n",
    "        words_to_use = random.sample(context_words, num_skips)\n",
    "        for j, context_word in enumerate(words_to_use):\n",
    "            batch[i * num_skips + j] = buffer[skip_window]\n",
    "            labels[i * num_skips + j, 0] = buffer[context_word]\n",
    "        if data_index == len(data):\n",
    "            for word in data[:span]:\n",
    "                buffer.append(word)\n",
    "            data_index = span\n",
    "        else:\n",
    "            buffer.append(data[data_index])\n",
    "            data_index += 1\n",
    "    # Backtrack a little bit to avoid skipping words in the end of a batch\n",
    "    data_index = (data_index + len(data) - span) % len(data)\n",
    "    return batch, labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "example 10 words: ['learning', 'experience', 'going', 'sleep', 'couple', 'hours', 'woke', 'afterwards', 'feeling', 'crappy']\n",
      "label dataset: ['anger', 'fear', 'joy', 'love', 'sadness', 'surprise']\n",
      "size corpus: 4433712\n",
      "size of unique words: 71554\n"
     ]
    }
   ],
   "source": [
    "vocabulary, dataset, label = read_data()\n",
    "print('example 10 words:',vocabulary[:10])\n",
    "print('label dataset:',label)\n",
    "print('size corpus:',len(vocabulary))\n",
    "vocabulary_size = len(list(set(vocabulary)))\n",
    "print('size of unique words:',vocabulary_size)\n",
    "dimension = 300\n",
    "skip_window = 1\n",
    "num_skips = 2\n",
    "iteration_train_vectors = 100\n",
    "batch_size = 64\n",
    "location = os.getcwd()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Most common words (+UNK) [['UNK', 1], ('feel', 289939), ('feeling', 134185), ('that', 130733), ('like', 73972)]\n",
      "Sample data [752, 261, 41, 232, 401, 323, 287, 1356, 2, 487] ['learning', 'experience', 'going', 'sleep', 'couple', 'hours', 'woke', 'afterwards', 'feeling', 'crappy']\n"
     ]
    }
   ],
   "source": [
    "data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,vocabulary_size)\n",
    "del vocabulary  # Hint to reduce memory.\n",
    "print('Most common words (+UNK)', count[:5])\n",
    "print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "valid_size = 16\n",
    "valid_window = 100\n",
    "valid_examples = np.random.choice(valid_window, valid_size, replace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "graph = tf.Graph()\n",
    "\n",
    "with graph.as_default():\n",
    "\n",
    "    train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n",
    "    train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n",
    "    valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n",
    "\n",
    "    # Ops and variables pinned to the CPU because of missing GPU implementation\n",
    "    with tf.device('/cpu:0'):\n",
    "        embeddings = tf.Variable(tf.random_uniform([vocabulary_size, dimension], -1.0, 1.0))\n",
    "        embed = tf.nn.embedding_lookup(embeddings, train_inputs)\n",
    "        nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, dimension],\n",
    "                                                      stddev=1.0 / np.sqrt(dimension)))\n",
    "        nce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n",
    "        loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights,\n",
    "                                             biases=nce_biases,\n",
    "                                             labels=train_labels,\n",
    "                                             inputs=embed,\n",
    "                                             num_sampled=batch_size / 2,\n",
    "                                             num_classes=vocabulary_size))\n",
    "        optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)\n",
    "\n",
    "        # Compute the cosine similarity between minibatch examples and all embeddings.\n",
    "        norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n",
    "        normalized_embeddings = embeddings / norm\n",
    "        valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)\n",
    "        similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)\n",
    "        init = tf.global_variables_initializer()\n",
    "\n",
    "num_steps = 100000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Average loss at step  0 :  158.17980957\n",
      "Nearest to will: trapted, adhered, ouran, reader, shockingly, becausei, upturn, bedingfield,\n",
      "Nearest to where: sheen, praxos, propriety, waysms, flab, homophobic, mysticism, googles,\n",
      "Nearest to sure: helloooo, tipex, lipped, pastels, hourly, turbo, seriouslyguys, sabahy,\n",
      "Nearest to felt: lexashmexa, olga, squid, schism, manzarek, shepards, mase, skdd,\n",
      "Nearest to again: fringes, hepburn, upright, leftover, assimilation, magically, innocence, vultures,\n",
      "Nearest to feel: sooooooooooo, bondi, predicable, anshika, gabaldon, lionelmessi, dueto, couture,\n",
      "Nearest to going: defiantly, michelin, nestle, offhand, dursleys, wildfell, engelbart, mengikuti,\n",
      "Nearest to that: shambling, preznit, deter, improper, someobdy, yorkshireman, etting, freehold,\n",
      "Nearest to after: clumsiness, backpackers, avowal, chilis, robles, attends, ispired, eraserhead,\n",
      "Nearest to this: yadda, hiked, proms, tresemme, joor, thembalitsha, obsticles, motorized,\n",
      "Nearest to which: signaling, enmayi, adaptor, detract, endeavours, bolywoodlatest, djing, pathan,\n",
      "Nearest to having: hannigan, dayquill, katies, aligncenter, cornerstones, backbreaking, nocturnal, beleiver,\n",
      "Nearest to because: trendingfever, miyan, inaccurate, tsuen, separated, becausse, accumulative, tryst,\n",
      "Nearest to from: downtown, plausible, asdfghjklqwertyuiop, disturb, roadblocks, swept, restraint, width,\n",
      "Nearest to made: connect, acquiesce, montmartre, ffviii, gallon, firming, fallout, unvalidated,\n",
      "Nearest to some: halsted, roxxy, femmes, becuz, kreamy, demba, shnookie, remarcably,\n",
      "Average loss at step  2000 :  88.4635332489\n",
      "Average loss at step  4000 :  54.9233110797\n",
      "Average loss at step  6000 :  42.8843253169\n",
      "Average loss at step  8000 :  34.8488111379\n",
      "Average loss at step  10000 :  29.6707653172\n",
      "Nearest to will: that, when, feeling, this, feel, have, would, with,\n",
      "Nearest to where: like, that, have, with, what, hurt, distracted, this,\n",
      "Nearest to sure: that, helloooo, feel, vain, write, later, ambitious, play,\n",
      "Nearest to felt: feeling, tomorrow, tummy, feel, dude, pens, whom, sense,\n",
      "Nearest to again: that, this, feel, innocence, when, lately, like, innocent,\n",
      "Nearest to feel: feeling, that, when, have, about, like, because, this,\n",
      "Nearest to going: pens, with, just, have, that, this, feel, because,\n",
      "Nearest to that: this, like, have, because, feel, about, when, feeling,\n",
      "Nearest to after: that, this, when, feeling, about, towards, feel, ventured,\n",
      "Nearest to this: that, feel, just, about, when, have, feeling, with,\n",
      "Nearest to which: that, denial, because, feel, come, just, feeling, when,\n",
      "Nearest to having: have, know, this, that, parent, privacy, early, president,\n",
      "Nearest to because: that, like, feel, feeling, about, with, when, have,\n",
      "Nearest to from: with, when, like, about, feeling, that, feel, because,\n",
      "Nearest to made: connect, unwanted, like, lose, choice, acquiesce, process, tear,\n",
      "Nearest to some: feel, that, this, answer, quiet, living, fearless, norman,\n",
      "Average loss at step  12000 :  25.0688477097\n",
      "Average loss at step  14000 :  22.0677209918\n",
      "Average loss at step  16000 :  19.9524238863\n",
      "Average loss at step  18000 :  17.5900986389\n",
      "Average loss at step  20000 :  16.0376581031\n",
      "Nearest to will: would, that, when, have, just, like, know, think,\n",
      "Nearest to where: that, this, with, when, what, have, like, world,\n",
      "Nearest to sure: know, helloooo, think, that, right, feel, have, like,\n",
      "Nearest to felt: feeling, feel, know, labels, tummy, because, tomorrow, more,\n",
      "Nearest to again: that, this, when, like, feel, just, because, feeling,\n",
      "Nearest to feel: feeling, just, really, because, that, have, when, like,\n",
      "Nearest to going: will, just, could, this, feeling, because, when, have,\n",
      "Nearest to that: just, because, when, this, have, what, like, really,\n",
      "Nearest to after: about, feeling, when, just, this, with, will, have,\n",
      "Nearest to this: just, that, when, feeling, feel, because, like, have,\n",
      "Nearest to which: that, because, just, feeling, like, about, when, feel,\n",
      "Nearest to having: have, because, that, know, feel, with, like, about,\n",
      "Nearest to because: that, just, when, like, about, know, really, feeling,\n",
      "Nearest to from: with, when, just, feeling, like, that, about, because,\n",
      "Nearest to made: that, like, acquiesce, connect, feeling, think, with, eager,\n",
      "Nearest to some: this, when, just, feel, people, that, have, these,\n",
      "Average loss at step  22000 :  14.775981323\n",
      "Average loss at step  24000 :  13.6673941532\n",
      "Average loss at step  26000 :  12.2628437511\n",
      "Average loss at step  28000 :  11.6312991129\n",
      "Average loss at step  30000 :  10.8328998178\n",
      "Nearest to will: would, want, could, when, going, because, just, have,\n",
      "Nearest to where: that, what, this, have, still, with, when, there,\n",
      "Nearest to sure: know, think, that, have, feel, helloooo, they, what,\n",
      "Nearest to felt: feel, feeling, been, still, this, really, have, because,\n",
      "Nearest to again: that, will, still, when, have, like, this, because,\n",
      "Nearest to feel: feeling, know, because, have, think, just, really, when,\n",
      "Nearest to going: will, because, could, feeling, would, want, this, what,\n",
      "Nearest to that: because, what, just, have, like, when, really, this,\n",
      "Nearest to after: about, when, with, will, feeling, this, from, because,\n",
      "Nearest to this: that, just, what, some, feel, because, still, feeling,\n",
      "Nearest to which: that, because, this, feel, have, like, what, just,\n",
      "Nearest to having: have, when, that, feeling, feel, this, because, with,\n",
      "Nearest to because: that, just, like, when, still, know, really, want,\n",
      "Nearest to from: with, when, just, will, because, have, like, into,\n",
      "Nearest to made: would, will, because, like, that, acquiesce, still, think,\n",
      "Nearest to some: this, just, when, that, will, feel, have, these,\n",
      "Average loss at step  32000 :  10.2426144308\n",
      "Average loss at step  34000 :  9.92490289843\n",
      "Average loss at step  36000 :  9.27350544703\n",
      "Average loss at step  38000 :  8.61187656212\n",
      "Average loss at step  40000 :  8.3941494416\n",
      "Nearest to will: would, want, could, going, should, just, that, feel,\n",
      "Nearest to where: that, this, still, what, when, there, with, because,\n",
      "Nearest to sure: know, think, feel, that, which, still, right, life,\n",
      "Nearest to felt: feel, feeling, really, still, just, this, because, have,\n",
      "Nearest to again: when, still, just, because, that, also, would, will,\n",
      "Nearest to feel: feeling, know, just, really, think, because, that, have,\n",
      "Nearest to going: will, this, could, want, because, just, would, still,\n",
      "Nearest to that: because, just, what, when, like, still, this, which,\n",
      "Nearest to after: when, about, feeling, because, with, this, just, know,\n",
      "Nearest to this: that, just, still, because, what, feel, when, which,\n",
      "Nearest to which: that, because, this, feel, when, just, what, have,\n",
      "Nearest to having: have, that, when, because, feeling, with, being, just,\n",
      "Nearest to because: that, just, still, when, like, know, really, want,\n",
      "Nearest to from: with, when, into, because, just, still, have, some,\n",
      "Nearest to made: would, will, always, make, that, because, like, still,\n",
      "Nearest to some: this, these, that, just, when, have, something, many,\n",
      "Average loss at step  42000 :  8.21246660602\n",
      "Average loss at step  44000 :  7.71552346885\n",
      "Average loss at step  46000 :  7.67670848131\n",
      "Average loss at step  48000 :  7.24560598433\n",
      "Average loss at step  50000 :  7.22249356949\n",
      "Nearest to will: would, could, want, going, because, should, that, need,\n",
      "Nearest to where: that, this, still, what, because, when, going, with,\n",
      "Nearest to sure: know, think, really, which, would, because, like, right,\n",
      "Nearest to felt: feel, feeling, really, because, have, been, just, still,\n",
      "Nearest to again: because, still, just, will, really, would, that, also,\n",
      "Nearest to feel: feeling, because, have, think, feels, really, know, that,\n",
      "Nearest to going: will, would, just, because, this, could, want, when,\n",
      "Nearest to that: because, just, still, which, when, have, really, what,\n",
      "Nearest to after: when, because, just, this, feeling, about, with, think,\n",
      "Nearest to this: just, that, still, life, which, really, because, would,\n",
      "Nearest to which: that, because, this, think, have, feel, still, always,\n",
      "Nearest to having: have, being, because, that, when, feeling, this, just,\n",
      "Nearest to because: that, just, when, still, know, think, really, like,\n",
      "Nearest to from: with, still, because, about, when, that, into, back,\n",
      "Nearest to made: make, would, will, could, always, like, makes, because,\n",
      "Nearest to some: these, that, just, those, many, this, other, something,\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average loss at step  52000 :  6.89679118335\n",
      "Average loss at step  54000 :  6.72881363487\n",
      "Average loss at step  56000 :  6.6662105571\n",
      "Average loss at step  58000 :  6.63735725844\n",
      "Average loss at step  60000 :  6.34787191606\n",
      "Nearest to will: would, could, want, should, going, need, that, when,\n",
      "Nearest to where: that, this, what, still, because, when, there, which,\n",
      "Nearest to sure: know, think, always, right, feel, still, really, would,\n",
      "Nearest to felt: feel, feeling, have, because, this, would, just, really,\n",
      "Nearest to again: still, will, then, also, could, would, because, just,\n",
      "Nearest to feel: feeling, think, know, feels, have, just, really, felt,\n",
      "Nearest to going: will, would, could, want, feeling, this, need, should,\n",
      "Nearest to that: because, just, still, which, when, also, what, have,\n",
      "Nearest to after: when, just, before, from, about, time, this, because,\n",
      "Nearest to this: that, just, feel, which, think, because, still, life,\n",
      "Nearest to which: that, because, just, still, think, always, this, feel,\n",
      "Nearest to having: have, being, feeling, when, because, that, think, getting,\n",
      "Nearest to because: that, think, just, still, which, when, like, really,\n",
      "Nearest to from: with, when, still, into, that, back, feeling, because,\n",
      "Nearest to made: make, could, would, will, makes, always, still, like,\n",
      "Nearest to some: these, those, something, many, feel, this, that, their,\n",
      "Average loss at step  62000 :  6.30640064931\n",
      "Average loss at step  64000 :  6.20332952845\n",
      "Average loss at step  66000 :  6.08739431679\n",
      "Average loss at step  68000 :  6.05798338103\n",
      "Average loss at step  70000 :  5.9190246675\n",
      "Nearest to will: would, could, should, want, need, going, still, them,\n",
      "Nearest to where: what, that, because, when, life, still, like, with,\n",
      "Nearest to sure: know, think, that, they, right, going, feel, really,\n",
      "Nearest to felt: feel, feeling, would, feels, really, still, know, have,\n",
      "Nearest to again: still, then, also, just, when, right, would, over,\n",
      "Nearest to feel: feeling, think, felt, really, feels, know, have, which,\n",
      "Nearest to going: will, would, could, just, right, feeling, should, want,\n",
      "Nearest to that: because, still, which, when, also, just, then, like,\n",
      "Nearest to after: when, about, feeling, still, with, this, before, time,\n",
      "Nearest to this: just, only, still, which, that, also, some, today,\n",
      "Nearest to which: that, because, this, still, also, feel, always, just,\n",
      "Nearest to having: have, being, feeling, because, getting, this, with, when,\n",
      "Nearest to because: that, still, when, just, think, like, really, know,\n",
      "Nearest to from: with, into, over, some, still, because, about, when,\n",
      "Nearest to made: make, still, makes, would, will, could, always, like,\n",
      "Nearest to some: these, many, those, other, this, just, their, something,\n",
      "Average loss at step  72000 :  5.84305190337\n",
      "Average loss at step  74000 :  5.84003363895\n",
      "Average loss at step  76000 :  5.70427984643\n",
      "Average loss at step  78000 :  5.73084589779\n",
      "Average loss at step  80000 :  5.63087281656\n",
      "Nearest to will: would, could, should, going, need, want, still, that,\n",
      "Nearest to where: what, that, still, because, life, which, think, when,\n",
      "Nearest to sure: know, think, they, right, only, going, always, really,\n",
      "Nearest to felt: feel, feeling, would, feels, know, really, still, think,\n",
      "Nearest to again: still, then, also, just, that, today, could, would,\n",
      "Nearest to feel: feeling, think, feels, really, know, felt, that, have,\n",
      "Nearest to going: would, could, will, need, just, right, want, still,\n",
      "Nearest to that: because, also, still, which, when, think, just, what,\n",
      "Nearest to after: when, before, because, still, love, while, just, with,\n",
      "Nearest to this: still, just, also, only, that, which, today, always,\n",
      "Nearest to which: that, because, this, still, think, also, then, only,\n",
      "Nearest to having: have, being, because, getting, this, feeling, that, think,\n",
      "Nearest to because: that, think, just, also, know, like, still, really,\n",
      "Nearest to from: with, into, when, still, back, love, about, after,\n",
      "Nearest to made: make, makes, would, could, still, always, will, didnt,\n",
      "Nearest to some: these, many, those, this, something, such, that, their,\n",
      "Average loss at step  82000 :  5.58116313028\n",
      "Average loss at step  84000 :  5.56175005555\n",
      "Average loss at step  86000 :  5.55855914819\n",
      "Average loss at step  88000 :  5.54672958326\n",
      "Average loss at step  90000 :  5.52160987794\n",
      "Nearest to will: would, should, could, want, need, going, didnt, just,\n",
      "Nearest to where: that, still, what, here, because, when, also, which,\n",
      "Nearest to sure: know, think, still, going, right, really, only, good,\n",
      "Nearest to felt: feel, feeling, would, know, feels, there, still, said,\n",
      "Nearest to again: still, then, when, also, just, today, always, really,\n",
      "Nearest to feel: feeling, think, feels, just, because, really, know, felt,\n",
      "Nearest to going: will, would, just, could, doing, want, getting, feeling,\n",
      "Nearest to that: because, still, also, which, really, when, what, just,\n",
      "Nearest to after: when, just, about, before, while, feel, with, feeling,\n",
      "Nearest to this: still, just, also, which, only, today, some, that,\n",
      "Nearest to which: that, because, still, also, this, just, feel, always,\n",
      "Nearest to having: have, being, feeling, getting, think, feel, this, because,\n",
      "Nearest to because: just, that, think, also, still, when, know, like,\n",
      "Nearest to from: with, into, after, back, love, through, when, will,\n",
      "Nearest to made: make, makes, could, would, making, didnt, always, still,\n",
      "Nearest to some: these, those, many, this, something, still, other, also,\n",
      "Average loss at step  92000 :  5.32551044202\n",
      "Average loss at step  94000 :  5.476118325\n",
      "Average loss at step  96000 :  5.39778642607\n",
      "Average loss at step  98000 :  5.38532819176\n"
     ]
    }
   ],
   "source": [
    "with tf.Session(graph=graph) as session:\n",
    "    init.run()\n",
    "    print('Initialized')\n",
    "\n",
    "    average_loss = 0\n",
    "    for step in range(num_steps):\n",
    "        batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)\n",
    "        feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}\n",
    "\n",
    "        _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)\n",
    "        average_loss += loss_val\n",
    "\n",
    "        if step % 2000 == 0:\n",
    "            if step > 0:\n",
    "                average_loss /= 2000\n",
    "            print('Average loss at step ', step, ': ', average_loss)\n",
    "            average_loss = 0\n",
    "\n",
    "        if step % 10000 == 0:\n",
    "            sim = similarity.eval()\n",
    "            for i in range(valid_size):\n",
    "                valid_word = reverse_dictionary[valid_examples[i]]\n",
    "                top_k = 8  # number of nearest neighbors\n",
    "                nearest = (-sim[i, :]).argsort()[1:top_k + 1]\n",
    "                log_str = 'Nearest to %s:' % valid_word\n",
    "                for k in range(top_k):\n",
    "                    close_word = reverse_dictionary[nearest[k]]\n",
    "                    log_str = '%s %s,' % (log_str, close_word)\n",
    "                print(log_str)\n",
    "    embedding_vals = session.run(embeddings)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('vector-emotion.p', 'wb') as fopen:\n",
    "    pickle.dump(embedding_vals, fopen)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
