{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Notebook written by [Zhedong Zheng](https://github.com/zhedongzheng)\n",
    "\n",
    "![title](img/word2vec.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n",
      "  return f(*args, **kwds)\n"
     ]
    }
   ],
   "source": [
    "from collections import Counter\n",
    "\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import re"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "PARAMS = {\n",
    "    'min_freq': 5,\n",
    "    'skip_window': 5,\n",
    "    'n_sampled': 100,\n",
    "    'embed_dim': 200,\n",
    "    'sample_words': ['six', 'gold', 'japan', 'college'],\n",
    "    'batch_size': 1000,\n",
    "    'n_epochs': 10,\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def preprocess_text(text):\n",
    "    text = text.replace('\\n', ' ')\n",
    "    text = re.sub('\\s+', ' ', text).strip().lower()\n",
    "\n",
    "    words = text.split()\n",
    "    word2freq = Counter(words)\n",
    "    words = [word for word in words if word2freq[word] > PARAMS['min_freq']]\n",
    "    print(\"Total words:\", len(words))\n",
    "\n",
    "    _words = set(words)\n",
    "    PARAMS['word2idx'] = {c: i for i, c in enumerate(_words)}\n",
    "    PARAMS['idx2word'] = {i: c for i, c in enumerate(_words)}\n",
    "    PARAMS['vocab_size'] = len(PARAMS['idx2word'])\n",
    "    print('Vocabulary size:', PARAMS['vocab_size'])\n",
    "\n",
    "    indexed = [PARAMS['word2idx'][w] for w in words]\n",
    "    indexed = filter_high_freq(indexed)\n",
    "    print(\"Word preprocessing completed ...\")\n",
    "    \n",
    "    return indexed\n",
    "\n",
    "def filter_high_freq(int_words, t=1e-5, threshold=0.8):\n",
    "    int_word_counts = Counter(int_words)\n",
    "    total_count = len(int_words)\n",
    "\n",
    "    word_freqs = {w: c / total_count for w, c in int_word_counts.items()}\n",
    "    prob_drop = {w: 1 - np.sqrt(t / word_freqs[w]) for w in int_word_counts}\n",
    "    train_words = [w for w in int_words if prob_drop[w] < threshold]\n",
    "\n",
    "    return train_words\n",
    "\n",
    "def make_data(int_words):\n",
    "    x, y = [], []\n",
    "    for i in range(0, len(int_words)):\n",
    "        input_w = int_words[i]\n",
    "        labels = get_y(int_words, i)\n",
    "        x.extend([input_w] * len(labels))\n",
    "        y.extend(labels)\n",
    "    return x, y\n",
    "\n",
    "\n",
    "def get_y(words, idx):\n",
    "    skip_window = np.random.randint(1, PARAMS['skip_window']+1)\n",
    "    left = idx - skip_window if (idx - skip_window) > 0 else 0\n",
    "    right = idx + skip_window\n",
    "    y = words[left: idx] + words[idx+1: right+1]\n",
    "    return list(set(y))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_fn(features, labels, mode, params):\n",
    "    W = tf.get_variable('softmax_W', [PARAMS['vocab_size'], PARAMS['embed_dim']])\n",
    "    b = tf.get_variable('softmax_b', [PARAMS['vocab_size']])\n",
    "    E = tf.get_variable('embedding', [PARAMS['vocab_size'], PARAMS['embed_dim']])\n",
    "    \n",
    "    embedded = tf.nn.embedding_lookup(E, features['x']) # forward activation\n",
    "    \n",
    "    if mode == tf.estimator.ModeKeys.TRAIN:\n",
    "        loss_op = tf.reduce_mean(tf.nn.sampled_softmax_loss(\n",
    "            weights = W,\n",
    "            biases = b,\n",
    "            labels = labels,\n",
    "            inputs = embedded,\n",
    "            num_sampled = PARAMS['n_sampled'],\n",
    "            num_classes = PARAMS['vocab_size']))\n",
    "\n",
    "        train_op = tf.train.AdamOptimizer().minimize(\n",
    "            loss_op, global_step=tf.train.get_global_step())\n",
    "        \n",
    "        return tf.estimator.EstimatorSpec(mode=mode, loss=loss_op, train_op=train_op)\n",
    "    \n",
    "    if mode == tf.estimator.ModeKeys.PREDICT:\n",
    "        normalized_E = tf.nn.l2_normalize(E, -1)\n",
    "        sample_E = tf.nn.embedding_lookup(normalized_E, features['x'])\n",
    "        similarity = tf.matmul(sample_E, normalized_E, transpose_b=True)\n",
    "        \n",
    "        return tf.estimator.EstimatorSpec(mode, predictions=similarity)\n",
    "    \n",
    "\n",
    "def print_neighbours(similarity, top_k=5):\n",
    "    for i in range(len(PARAMS['sample_words'])):\n",
    "        neighbours = (-similarity[i]).argsort()[1:top_k+1]\n",
    "        log = 'Nearest to [%s]:' % PARAMS['sample_words'][i]\n",
    "        for k in range(top_k):\n",
    "            neighbour = PARAMS['idx2word'][neighbours[k]]\n",
    "            log = '%s %s,' % (log, neighbour)\n",
    "        print(log)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total words: 885720\n",
      "Vocabulary size: 9582\n",
      "Word preprocessing completed ...\n",
      "INFO:tensorflow:Using default config.\n",
      "WARNING:tensorflow:Using temporary folder as model directory: /var/folders/sx/fv0r97j96fz8njp14dt5g7940000gn/T/tmpkhu995ue\n",
      "INFO:tensorflow:Using config: {'_model_dir': '/var/folders/sx/fv0r97j96fz8njp14dt5g7940000gn/T/tmpkhu995ue', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x121748d68>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n",
      "WARNING:tensorflow:Estimator's model_fn (<function model_fn at 0x121715e18>) includes params argument, but params are not passed to Estimator.\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Create CheckpointSaverHook.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "INFO:tensorflow:Saving checkpoints for 0 into /var/folders/sx/fv0r97j96fz8njp14dt5g7940000gn/T/tmpkhu995ue/model.ckpt.\n",
      "INFO:tensorflow:loss = 3.8326235, step = 1\n",
      "INFO:tensorflow:global_step/sec: 52.152\n",
      "INFO:tensorflow:loss = 3.5237663, step = 101 (1.919 sec)\n",
      "INFO:tensorflow:global_step/sec: 57.4445\n",
      "INFO:tensorflow:loss = 3.736324, step = 201 (1.741 sec)\n",
      "INFO:tensorflow:global_step/sec: 61.1107\n",
      "INFO:tensorflow:loss = 3.6907232, step = 301 (1.636 sec)\n",
      "INFO:tensorflow:global_step/sec: 65.4204\n",
      "INFO:tensorflow:loss = 3.6923845, step = 401 (1.529 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.0271\n",
      "INFO:tensorflow:loss = 3.7817593, step = 501 (1.492 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.3436\n",
      "INFO:tensorflow:loss = 3.4042459, step = 601 (1.485 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.848\n",
      "INFO:tensorflow:loss = 3.2055595, step = 701 (1.474 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.4894\n",
      "INFO:tensorflow:loss = 3.4861774, step = 801 (1.460 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.7185\n",
      "INFO:tensorflow:loss = 3.852464, step = 901 (1.477 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.7152\n",
      "INFO:tensorflow:loss = 3.27183, step = 1001 (1.477 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.4628\n",
      "INFO:tensorflow:loss = 3.1705825, step = 1101 (1.482 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.5237\n",
      "INFO:tensorflow:loss = 3.309683, step = 1201 (1.481 sec)\n",
      "INFO:tensorflow:global_step/sec: 66.66\n",
      "INFO:tensorflow:loss = 2.5463226, step = 1301 (1.500 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.5917\n",
      "INFO:tensorflow:loss = 3.3952959, step = 1401 (1.480 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.8379\n",
      "INFO:tensorflow:loss = 2.5858288, step = 1501 (1.474 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.7877\n",
      "INFO:tensorflow:loss = 4.1801033, step = 1601 (1.475 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.9552\n",
      "INFO:tensorflow:loss = 3.7573342, step = 1701 (1.472 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.3496\n",
      "INFO:tensorflow:loss = 3.8046432, step = 1801 (1.463 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.818\n",
      "INFO:tensorflow:loss = 3.0411692, step = 1901 (1.474 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.522\n",
      "INFO:tensorflow:loss = 3.4385188, step = 2001 (1.459 sec)\n",
      "INFO:tensorflow:global_step/sec: 69.119\n",
      "INFO:tensorflow:loss = 3.3135967, step = 2101 (1.447 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.8014\n",
      "INFO:tensorflow:loss = 3.1533237, step = 2201 (1.453 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.1812\n",
      "INFO:tensorflow:loss = 3.0474925, step = 2301 (1.488 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.5983\n",
      "INFO:tensorflow:loss = 3.6870258, step = 2401 (1.479 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.4479\n",
      "INFO:tensorflow:loss = 3.0402858, step = 2501 (1.483 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.8291\n",
      "INFO:tensorflow:loss = 3.5627744, step = 2601 (1.475 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.0991\n",
      "INFO:tensorflow:loss = 3.2046113, step = 2701 (1.468 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.9965\n",
      "INFO:tensorflow:loss = 3.8066313, step = 2801 (1.471 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.6251\n",
      "INFO:tensorflow:loss = 2.8772354, step = 2901 (1.479 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.9921\n",
      "INFO:tensorflow:loss = 3.3384337, step = 3001 (1.471 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.3345\n",
      "INFO:tensorflow:loss = 3.5551777, step = 3101 (1.485 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.5251\n",
      "INFO:tensorflow:loss = 3.0462096, step = 3201 (1.459 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.6736\n",
      "INFO:tensorflow:loss = 2.840562, step = 3301 (1.478 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.9839\n",
      "INFO:tensorflow:loss = 2.6223457, step = 3401 (1.471 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.1787\n",
      "INFO:tensorflow:loss = 3.4121203, step = 3501 (1.467 sec)\n",
      "INFO:tensorflow:global_step/sec: 69.2551\n",
      "INFO:tensorflow:loss = 3.196146, step = 3601 (1.444 sec)\n",
      "INFO:tensorflow:global_step/sec: 69.0603\n",
      "INFO:tensorflow:loss = 3.3870635, step = 3701 (1.448 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.3746\n",
      "INFO:tensorflow:loss = 3.102434, step = 3801 (1.484 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.0015\n",
      "INFO:tensorflow:loss = 2.6939332, step = 3901 (1.471 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.9594\n",
      "INFO:tensorflow:loss = 3.0394561, step = 4001 (1.471 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.9701\n",
      "INFO:tensorflow:loss = 2.9164503, step = 4101 (1.471 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.4719\n",
      "INFO:tensorflow:loss = 3.2061157, step = 4201 (1.482 sec)\n",
      "INFO:tensorflow:global_step/sec: 66.8708\n",
      "INFO:tensorflow:loss = 2.0376818, step = 4301 (1.496 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.4033\n",
      "INFO:tensorflow:loss = 3.5225468, step = 4401 (1.462 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.1684\n",
      "INFO:tensorflow:loss = 3.5033386, step = 4501 (1.467 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.9573\n",
      "INFO:tensorflow:loss = 2.3361926, step = 4601 (1.471 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.2731\n",
      "INFO:tensorflow:loss = 2.5067017, step = 4701 (1.486 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.0972\n",
      "INFO:tensorflow:loss = 3.2109315, step = 4801 (1.491 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.7195\n",
      "INFO:tensorflow:loss = 3.045063, step = 4901 (1.477 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.808\n",
      "INFO:tensorflow:loss = 3.213928, step = 5001 (1.475 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.7766\n",
      "INFO:tensorflow:loss = 3.4073873, step = 5101 (1.476 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.4942\n",
      "INFO:tensorflow:loss = 2.7033536, step = 5201 (1.482 sec)\n",
      "INFO:tensorflow:global_step/sec: 66.4487\n",
      "INFO:tensorflow:loss = 2.6934423, step = 5301 (1.505 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.9672\n",
      "INFO:tensorflow:loss = 2.9727929, step = 5401 (1.471 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.0662\n",
      "INFO:tensorflow:loss = 3.4013019, step = 5501 (1.469 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.3987\n",
      "INFO:tensorflow:loss = 2.4184458, step = 5601 (1.484 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.8938\n",
      "INFO:tensorflow:loss = 2.6207447, step = 5701 (1.473 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.8653\n",
      "INFO:tensorflow:loss = 2.4602451, step = 5801 (1.452 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.7031\n",
      "INFO:tensorflow:loss = 2.929852, step = 5901 (1.477 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.4152\n",
      "INFO:tensorflow:loss = 2.683112, step = 6001 (1.462 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.2085\n",
      "INFO:tensorflow:loss = 2.7681563, step = 6101 (1.466 sec)\n",
      "INFO:tensorflow:global_step/sec: 52.8636\n",
      "INFO:tensorflow:loss = 3.3275795, step = 6201 (1.892 sec)\n",
      "INFO:tensorflow:global_step/sec: 54.0027\n",
      "INFO:tensorflow:loss = 3.1814613, step = 6301 (1.852 sec)\n",
      "INFO:tensorflow:global_step/sec: 58.7064\n",
      "INFO:tensorflow:loss = 2.755523, step = 6401 (1.703 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.7964\n",
      "INFO:tensorflow:loss = 2.991853, step = 6501 (1.475 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.9456\n",
      "INFO:tensorflow:loss = 3.5853095, step = 6601 (1.472 sec)\n",
      "INFO:tensorflow:global_step/sec: 69.3044\n",
      "INFO:tensorflow:loss = 3.06126, step = 6701 (1.443 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.5897\n",
      "INFO:tensorflow:loss = 2.8720276, step = 6801 (1.480 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.3927\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:loss = 2.3783972, step = 6901 (1.462 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.2986\n",
      "INFO:tensorflow:loss = 3.0956774, step = 7001 (1.464 sec)\n",
      "INFO:tensorflow:global_step/sec: 69.2588\n",
      "INFO:tensorflow:loss = 2.9790354, step = 7101 (1.444 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.4006\n",
      "INFO:tensorflow:loss = 3.6135902, step = 7201 (1.462 sec)\n",
      "INFO:tensorflow:global_step/sec: 65.8242\n",
      "INFO:tensorflow:loss = 2.4605467, step = 7301 (1.520 sec)\n",
      "INFO:tensorflow:global_step/sec: 66.8466\n",
      "INFO:tensorflow:loss = 2.7080777, step = 7401 (1.496 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.4517\n",
      "INFO:tensorflow:loss = 2.3850899, step = 7501 (1.482 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.0477\n",
      "INFO:tensorflow:loss = 2.744026, step = 7601 (1.470 sec)\n",
      "INFO:tensorflow:global_step/sec: 68.1496\n",
      "INFO:tensorflow:loss = 2.5234501, step = 7701 (1.467 sec)\n",
      "INFO:tensorflow:global_step/sec: 67.7294\n",
      "INFO:tensorflow:loss = 3.2040606, step = 7801 (1.476 sec)\n",
      "INFO:tensorflow:Saving checkpoints for 7880 into /var/folders/sx/fv0r97j96fz8njp14dt5g7940000gn/T/tmpkhu995ue/model.ckpt.\n",
      "INFO:tensorflow:Loss for final step: 3.061059.\n",
      "INFO:tensorflow:Calling model_fn.\n",
      "INFO:tensorflow:Done calling model_fn.\n",
      "INFO:tensorflow:Graph was finalized.\n",
      "INFO:tensorflow:Restoring parameters from /var/folders/sx/fv0r97j96fz8njp14dt5g7940000gn/T/tmpkhu995ue/model.ckpt-7880\n",
      "INFO:tensorflow:Running local_init_op.\n",
      "INFO:tensorflow:Done running local_init_op.\n",
      "Nearest to [six]: markets, wo, facing, over, so,\n",
      "Nearest to [gold]: ounce, ounces, silver, platinum, bullion,\n",
      "Nearest to [japan]: sense, grim, advent, arrange, tackle,\n",
      "Nearest to [college]: basketball, football, colleges, freshman, parents,\n"
     ]
    }
   ],
   "source": [
    "with open('../temp/ptb_train.txt') as f:\n",
    "    x_train, y_train = make_data(preprocess_text(f.read()))\n",
    "\n",
    "estimator = tf.estimator.Estimator(model_fn)\n",
    "\n",
    "estimator.train(tf.estimator.inputs.numpy_input_fn(\n",
    "    x = {'x': np.array(x_train)},\n",
    "    y = np.expand_dims(y_train, -1),\n",
    "    batch_size = PARAMS['batch_size'],\n",
    "    num_epochs = PARAMS['n_epochs'],\n",
    "    shuffle = True))\n",
    "\n",
    "sim = np.array(list(estimator.predict(tf.estimator.inputs.numpy_input_fn(\n",
    "    x = {'x': np.array([PARAMS['word2idx'][w] for w in PARAMS['sample_words']])},\n",
    "    shuffle = False))))\n",
    "\n",
    "print_neighbours(sim)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
