{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n",
    "#\n",
    "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
    "# you may not use this file except in compliance with the License.\n",
    "# You may obtain a copy of the License at\n",
    "#\n",
    "#     http://www.apache.org/licenses/LICENSE-2.0\n",
    "#\n",
    "# Unless required by applicable law or agreed to in writing, software\n",
    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
    "# See the License for the specific language governing permissions and\n",
    "# limitations under the License.\n",
    "# ==============================================================================\n",
    "\"\"\"Basic word2vec example.\"\"\"\n",
    "\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import collections\n",
    "import math\n",
    "import os\n",
    "import random\n",
    "from tempfile import gettempdir\n",
    "import zipfile\n",
    "import json\n",
    "\n",
    "import numpy as np\n",
    "from six.moves import urllib\n",
    "from six.moves import xrange  # pylint: disable=redefined-builtin\n",
    "import tensorflow as tf\n",
    "from pylab import mpl"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "filename = \"QuanSongCi.txt\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Read the data into a list of strings.\n",
    "def read_data(filename):\n",
    "    \"\"\"Extract the file as a list of words.\"\"\"\n",
    "    f = open(filename, 'r', encoding='UTF-8')\n",
    "    data = f.read()\n",
    "    return data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Data size 1903073\n"
     ]
    }
   ],
   "source": [
    "vocabulary = read_data(filename)\n",
    "print('Data size', len(vocabulary))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "vocabulary = list(vocabulary)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Most common words (+UNK) [['UNK', 1196], ('。', 149620), ('\\n', 117070), ('，', 108451), ('、', 19612)]\n",
      "Sample data [1503, 1828, 2, 2, 40, 613, 47, 9, 111, 117] ['潘', '阆', '\\n', '\\n', '酒', '泉', '子', '（', '十', '之']\n"
     ]
    }
   ],
   "source": [
    "# Step 2: Build the dictionary and replace rare words with UNK token.\n",
    "vocabulary_size = 5000\n",
    "\n",
    "def build_dataset(words, n_words):\n",
    "  \"\"\"Process raw inputs into a dataset.\"\"\"\n",
    "  count = [['UNK', -1]] #方便查询每个单词出现的次数\n",
    "  #统计词频,返回词频最高的前n个扩展到count中\n",
    "  count.extend(collections.Counter(words).most_common(n_words - 1))\n",
    "  dictionary = dict()\n",
    "  for word, _ in count:\n",
    "    dictionary[word] = len(dictionary) #建立映射,单词->单词在count中的索引号\n",
    "    \n",
    "  data = list()\n",
    "  unk_count = 0\n",
    "  #主要是统计'UNK'出现的次数\n",
    "  for word in words:\n",
    "    index = dictionary.get(word, 0)\n",
    "    if index == 0:  # dictionary['UNK']\n",
    "      unk_count += 1\n",
    "    data.append(index) #每个word在dictionary中的索引号\n",
    "  count[0][1] = unk_count\n",
    "  #zip打包元组列表,计算一个反向词典方便由索引得到单词\n",
    "  reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n",
    "    \n",
    "  return data, count, dictionary, reversed_dictionary\n",
    "\n",
    "# Filling 4 global variables:\n",
    "# data - list of codes (integers from 0 to vocabulary_size-1).\n",
    "#   This is the original text but words are replaced by their codes\n",
    "# count - map of words(strings) to count of occurrences\n",
    "# dictionary - map of words(strings) to their codes(integers)\n",
    "# reverse_dictionary - maps codes(integers) to words(strings)\n",
    "data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,\n",
    "                                                            vocabulary_size)\n",
    "with open('dictionary.json', 'w', encoding='utf-8') as fp:\n",
    "  json.dump(dictionary, fp, ensure_ascii=False)\n",
    "with open('reversed_dictionary.json', 'w', encoding='utf-8') as fp:\n",
    "  json.dump(reverse_dictionary, fp, ensure_ascii=False)\n",
    "    \n",
    "del vocabulary  # Hint to reduce memory.\n",
    "print('Most common words (+UNK)', count[:5])\n",
    "print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1828 阆 -> 1503 潘\n",
      "1828 阆 -> 2 \n",
      "\n",
      "2 \n",
      " -> 1828 阆\n",
      "2 \n",
      " -> 2 \n",
      "\n",
      "2 \n",
      " -> 40 酒\n",
      "2 \n",
      " -> 2 \n",
      "\n",
      "40 酒 -> 2 \n",
      "\n",
      "40 酒 -> 613 泉\n"
     ]
    }
   ],
   "source": [
    "data_index = 0\n",
    "# Step 3: Function to generate a training batch for the skip-gram model.\n",
    "# 提供一个生成训练批数据的函数\n",
    "# batch_size:每次训练取多少数据\n",
    "# num_skip:对于一个输入数据产生多少个标签数据\n",
    "# skip_window:确定取一个词周边多远的词来训练\n",
    "# batch和labels都是单词对应的词典索引\n",
    "# skip_windows决定上下文的长度,就是当前词的周围多少个词内的词被视为它的上下文的范围内，\n",
    "# 然后从这上下文范围内的词中随机取num_skips个与输入组合成num_skips组训练数据  \n",
    "def generate_batch(batch_size, num_skips, skip_window):\n",
    "  global data_index\n",
    "  assert batch_size % num_skips == 0\n",
    "  assert num_skips <= 2 * skip_window #保证num_skipes不会超过当前输入的上下文的词的总个数\n",
    "  batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n",
    "  labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n",
    "  span = 2 * skip_window + 1  # [ skip_window target skip_window ]\n",
    "  #最大长度是span,后面如果数据超过这个长度,前面的会被挤掉,这样使得buffer里面永远是data_index周围span个数据\n",
    "  buffer = collections.deque(maxlen=span)\n",
    "\n",
    "  if data_index + span > len(data):\n",
    "    data_index = 0\n",
    "  buffer.extend(data[data_index:data_index + span])\n",
    "  data_index += span\n",
    "    \n",
    "  for i in range(batch_size // num_skips):\n",
    "    context_words = [w for w in range(span) if w != skip_window]\n",
    "    words_to_use = random.sample(context_words, num_skips)\n",
    "    for j, context_word in enumerate(words_to_use):\n",
    "      batch[i * num_skips + j] = buffer[skip_window]\n",
    "      labels[i * num_skips + j, 0] = buffer[context_word]\n",
    "    if data_index == len(data):\n",
    "      #buffer[:] = data[:span]\n",
    "      #buffer.extend(data[:span])\n",
    "      buffer = data[:span]\n",
    "      data_index = span\n",
    "    else:\n",
    "      buffer.append(data[data_index])\n",
    "      data_index += 1\n",
    "        \n",
    "  # Backtrack a little bit to avoid skipping words in the end of a batch\n",
    "  data_index = (data_index + len(data) - span) % len(data)\n",
    "  return batch, labels\n",
    "\n",
    "batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)\n",
    "for i in range(8):\n",
    "  print(batch[i], reverse_dictionary[batch[i]],\n",
    "        '->', labels[i, 0], reverse_dictionary[labels[i, 0]])\n",
    "\n",
    "# Step 4: Build and train a skip-gram model.\n",
    "batch_size = 128    \n",
    "embedding_size = 128  # Dimension of the embedding vector.\n",
    "skip_window = 1       # How many words to consider left and right.\n",
    "num_skips = 2         # How many times to reuse an input to generate a label.\n",
    "num_sampled = 64      # Number of negative examples to sample.\n",
    "\n",
    "# We pick a random validation set to sample nearest neighbors. Here we limit the\n",
    "# validation samples to the words that have a low numeric ID, which by\n",
    "# construction are also the most frequent. These 3 variables are used only for\n",
    "# displaying model accuracy, they don't affect calculation.\n",
    "valid_size = 16     # Random set of words to evaluate similarity on.\n",
    "valid_window = 100  # Only pick dev samples in the head of the distribution.\n",
    "valid_examples = np.random.choice(valid_window, valid_size, replace=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-8-a8291ace2eb1>:40: calling reduce_sum (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "keep_dims is deprecated, use keepdims instead\n",
      "Initialized\n",
      "Average loss at step  0 :  207.0222930908203\n",
      "Nearest to 醉: 纽, 因, 详, 杵, 绿, 樱, 宿, 聂,\n",
      "Nearest to 无: 歃, 星, 6, 谊, 拍, 玷, 决, 萱,\n",
      "Nearest to 自: 汨, 叶, 闸, 漏, 诣, 粒, 熔, 茯,\n",
      "Nearest to （: 表, 曙, 臼, 忌, 没, 茎, 锐, 磨,\n",
      "Nearest to 天: , 豚, 塍, 躔, 滋, 架, 肖, 楸,\n",
      "Nearest to 相: 跋, 红, 淑, 穆, 党, 弈, 滤, 靓,\n",
      "Nearest to 仙: 钜, 翘, 网, 派, 甬, 毅, 浩, 攫,\n",
      "Nearest to 。: 孔, 驾, 蜃, 湓, 遏, ,, 沽, 徂,\n",
      "Nearest to 得: 焙, 秾, 涑, 势, 菟, 雅, 徊, 哲,\n",
      "Nearest to 好: 弇, 活, 冻, 裯, 羯, “, 总, 托,\n",
      "Nearest to 生: 诣, 孟, 为, 请, 鏖, 嶙, 晌, 场,\n",
      "Nearest to 此: 数, 欷, 案, 贩, 隐, 斯, 泾, 脏,\n",
      "Nearest to 空: 笳, 碾, 微, 簪, 方, 贼, 仓, 孤,\n",
      "Nearest to 梦: 雏, 纪, 缩, 箫, 鹧, 髓, 躇, 辚,\n",
      "Nearest to 楼: 如, 臭, 街, 垆, 耆, 珍, 塍, 莹,\n",
      "Nearest to 酒: 辞, 忙, 谶, 窭, 害, 樵, 箧, 枷,\n",
      "Average loss at step  2000 :  21.394089884519577\n",
      "Average loss at step  4000 :  5.305496330142021\n",
      "Average loss at step  6000 :  4.889249450564384\n",
      "Average loss at step  8000 :  4.669546113967895\n",
      "Average loss at step  10000 :  4.5962701553404335\n",
      "Nearest to 醉: 因,  , 樱, 宿, 尤, 晶, 详, 朦,\n",
      "Nearest to 无: 星, 窕, 功, 6, 羞, ，, 窈, 饭,\n",
      "Nearest to 自: 汨, 记,  , 漏, 叶, 瞋, 定, 性,\n",
      "Nearest to （: 表, 貌, 磨, 降, ，, 锐, 迩, 曙,\n",
      "Nearest to 天: 滋, 豚, 架, 弄, 菱, 尔, 躔, 寸,\n",
      "Nearest to 相: 淑, 算, 遣, 辞, 跋, 烬, 了, 醿,\n",
      "Nearest to 仙: 派, 寸, 翘, 浩, 网, 黏, 甬, 粹,\n",
      "Nearest to 。: ，, 、, 如, 内, 冤, 由, 充, 慢,\n",
      "Nearest to 得: 焙, 徊, 赖, 秾, 列, 势, 雅, 跹,\n",
      "Nearest to 好: 但, 总, 托, 株, 冻, 活, 藤, 挼,\n",
      "Nearest to 生:  , 为, 诣, 请, 场, 孟, 绽, 篷,\n",
      "Nearest to 此: 数,  , 瓯, 案, 这, 沁, 黍, 斯,\n",
      "Nearest to 空: 孤, 碾, 笳, 方, 微, 翔, 簪, 粱,\n",
      "Nearest to 梦: 雏, 鹧, 箫, 常, 髓, 纪, 歇, 来,\n",
      "Nearest to 楼: 垆, 街, 珍, 如, 仁, 淡, 耆, 每,\n",
      "Nearest to 酒: 忙, 辞, 脂, 律, 絮, 省, 据, 蕙,\n",
      "Average loss at step  12000 :  4.55029765856266\n",
      "Average loss at step  14000 :  4.454616240859032\n",
      "Average loss at step  16000 :  4.468457687735557\n",
      "Average loss at step  18000 :  4.5062953075170515\n",
      "Average loss at step  20000 :  4.4395638419389725\n",
      "Nearest to 醉: 因, 纽, 宿, 樱, 尤,  , 详, 氲,\n",
      "Nearest to 无: 窕, 功, 窈, 粼, 决, 炽, 筝, 维,\n",
      "Nearest to 自: 汨, 公,  , 记, 粒, 定, 瞋, 谈,\n",
      "Nearest to （: 表, ，, 貌, 、, 磨, 缩, 鞠, 锐,\n",
      "Nearest to 天: , 豚, 滋, 躔, 菱, 珊, 樯, 窈,\n",
      "Nearest to 相: 淑, 跋, 算, 遣, 辞, 烬, 弈, 了,\n",
      "Nearest to 仙: 派, 寸, 浩, 甬, 网, 欷, 黏, 毅,\n",
      "Nearest to 。: ，, 、, \n",
      ", 撤, 充, 辀, ）, 骎,\n",
      "Nearest to 得: 徊, 焙, 跹, 赖, 光, 秾, 赢, 列,\n",
      "Nearest to 好: 但, 总, 筛, 裯, 翥, 株, 托, 藤,\n",
      "Nearest to 生:  , 诣, 为, 篷, 鏖, 绽, 娄, 请,\n",
      "Nearest to 此: 这, 数,  , 斯, 沁, 寓, 衲, 瓯,\n",
      "Nearest to 空: 孤, 微, 碾, 方, 粱, 笳, 怅, 翔,\n",
      "Nearest to 梦: 雏, 鹧, 都, 髓, 箫, 常, 佛, 歇,\n",
      "Nearest to 楼: 垆, 珍, 仁, 街, 阁, 耆, 塍, 冬,\n",
      "Nearest to 酒: 忙, 辞, 脂, 律, 据, 谶, 贮, 窭,\n",
      "Average loss at step  22000 :  4.389729565382003\n",
      "Average loss at step  24000 :  4.344560622215271\n",
      "Average loss at step  26000 :  4.363477974176407\n",
      "Average loss at step  28000 :  4.405505554437637\n",
      "Average loss at step  30000 :  4.321439734697342\n",
      "Nearest to 醉: 因, 尤, 纽,  , 樱, 详, 氲, 宿,\n",
      "Nearest to 无: 粼, 何, 窕, 炽, 窈, 衷, 羞, 功,\n",
      "Nearest to 自: 汨, 谈,  , 公, 定, 眈, 记, 鼙,\n",
      "Nearest to （: 表, ，, 貌, 鞠, 缩, 、, 锐, ）,\n",
      "Nearest to 天: 豚, , 俯, 滋, 窈, 塍, 躔, 吴,\n",
      "Nearest to 相: 淑, 跋, 算, 烬, 执, 酉, 醿, 辞,\n",
      "Nearest to 仙: 派, 甬, 欷, 预, 浩, 妓, 寸, 攫,\n",
      "Nearest to 。: ，, 、, \n",
      ", ）, 辀, 蔡, 撤, 鹧,\n",
      "Nearest to 得: 徊, 取, 句, 焙, 赖, 含, 赢, 昔,\n",
      "Nearest to 好: 总, 但, 翥, 筛, 裯, 敌, 株, 藤,\n",
      "Nearest to 生: 诣, 篷, 为,  , 鏖, 纳, 娄, 复,\n",
      "Nearest to 此: 这, 今, 寓, 衲, 犯,  , 肉, 数,\n",
      "Nearest to 空: 孤, 微, 碾, 粱, 怅, 方, 翔, 笳,\n",
      "Nearest to 梦: 雏, 鹧, 都, 佛, 箫, 常, 歇, 髓,\n",
      "Nearest to 楼: 珍, 垆, 仁, 阁, 塍, 街, 哥, 鼓,\n",
      "Nearest to 酒: 窭, 忙, 辞, 脂, 谶, 箧, 据, 贮,\n",
      "Average loss at step  32000 :  4.166112600326538\n",
      "Average loss at step  34000 :  4.204160340428352\n",
      "Average loss at step  36000 :  4.23006632399559\n",
      "Average loss at step  38000 :  4.192692961215973\n",
      "Average loss at step  40000 :  4.180615952312946\n",
      "Nearest to 醉: 因, 尤, 樱, 纽, 氲, 骥, 聂, 宿,\n",
      "Nearest to 无: 何, 衷, 粼, 炽, 窈, 窕, 僽, 鄙,\n",
      "Nearest to 自: 谈, 汨, 眈, 定, 瞋, 某, 记, 舫,\n",
      "Nearest to （: 表, 鞠, ，, 貌, 、, 缩, 锐, 衤,\n",
      "Nearest to 天: 豚, 躔, 滋, 塍, , 轲, 俯, 珊,\n",
      "Nearest to 相: 淑, 算, 跋, 执, 酉, 辞, 烬, 难,\n",
      "Nearest to 仙: 派, 南, 甬, 攫, 预, 浩, 欷, 妓,\n",
      "Nearest to 。: ，, 、, 充, 辀, \n",
      ", 斫, 骎, ）,\n",
      "Nearest to 得: 取, 徊, 赖, 焙, 句, 昔, □, 君,\n",
      "Nearest to 好: 总, 但, 翥, 筛, 裯, 敌, 株, 悄,\n",
      "Nearest to 生: 篷, 诣, 为, 请, 娄, 杼, 纳, 复,\n",
      "Nearest to 此: 这, 寓, 今, 衲, 斯, 嵚, 数, 肉,\n",
      "Nearest to 空: 孤, 微, 方, 粱, 碾, 怅, 妥, 馀,\n",
      "Nearest to 梦: 雏, 鹧, 佛, 箫, 都, 歇, 飙, 若,\n",
      "Nearest to 楼: 阁, 垆, 珍, 仁, 胧, 糟, 台, 鼓,\n",
      "Nearest to 酒: 窭, 脂, 辞, 忙, 箧, 谶, 贮, 据,\n",
      "Average loss at step  42000 :  4.206320951223374\n",
      "Average loss at step  44000 :  4.202765667200088\n",
      "Average loss at step  46000 :  4.235835138678551\n",
      "Average loss at step  48000 :  4.280283239245414\n",
      "Average loss at step  50000 :  4.257928439378738\n",
      "Nearest to 醉: 尤, 氲, 因, 骥, 纽, 宿, 樱, 笑,\n",
      "Nearest to 无: 粼, 何, 衷, 炽, 窈, 鄙, 窕, 返,\n",
      "Nearest to 自: 谈, 某, 往, 眈, 应, 公, 粒, 汨,\n",
      "Nearest to （: ，, 表, 、, 鞠, 缩, 貌, 锐, 衤,\n",
      "Nearest to 天: 豚, 躔, 滋, 俯, , 珊, 樯, 塍,\n",
      "Nearest to 相: 淑, 执, 酉, 徒, 颅, 跋, 烬, 辞,\n",
      "Nearest to 仙: 派, 攫, 甬, 妓, 预, 南, 欷, 救,\n",
      "Nearest to 。: ，, 、, \n",
      ", 撤, 鹧, ）, 辀, 充,\n",
      "Nearest to 得: 取, 句, 徊, 光, 焙, 赢, 含, 裂,\n",
      "Nearest to 好: 总, 翥, 但, 筛, 裯, 敌, 枷, 恰,\n",
      "Nearest to 生: 篷, 诣, 鏖, 娄, 蹀, 杼, 纳, 为,\n",
      "Nearest to 此: 寓, 这, 今, 衲, 斯, 嵚, 数, 肉,\n",
      "Nearest to 空: 孤, 怅, 微, 碾, 粱, 耳, 方, 馀,\n",
      "Nearest to 梦: 鹧, 佛, 雏, 箫, 都, 蝣, 飙, 歇,\n",
      "Nearest to 楼: 阁, 梯, 台, 珍, 垆, 仁, 胧, 霸,\n",
      "Nearest to 酒: 窭, 脂, 忙, 辞, 贮, 箧, 谶, 据,\n",
      "Average loss at step  52000 :  4.2399769299030305\n",
      "Average loss at step  54000 :  4.194504273533821\n",
      "Average loss at step  56000 :  4.230890997409821\n",
      "Average loss at step  58000 :  4.250695599794388\n",
      "Average loss at step  60000 :  4.1738847706317905\n",
      "Nearest to 醉: 尤, 氲, 因, 骥, 纽, 笑, 樱, 宿,\n",
      "Nearest to 无: 何, 衷, 粼, 返, 不, 炽, 窈, 窕,\n",
      "Nearest to 自: 谈, 某, 应, 眈, 薛, 往, 捉, 塑,\n",
      "Nearest to （: ，, 、, 鞠, ·, 表, 貌, 缩, ）,\n",
      "Nearest to 天: 豚, 俯, 空, 塍, 滋, 敞, 躔, 嘱,\n",
      "Nearest to 相: 域, 酉, 执, 曾, 飘, 淑, 难, 磐,\n",
      "Nearest to 仙: 派, 南, 预, 妓, 甬, 攫, 欷, 葩,\n",
      "Nearest to 。: ，, 、, \n",
      ", 撤, 菩, ）, 辀, 斫,\n",
      "Nearest to 得: 取, 句, 徊, 君, 赢, 昔, 含, 裂,\n",
      "Nearest to 好: 总, 翥, 敌, 筛, 但, 黟, 诱, 裯,\n",
      "Nearest to 生: 篷, 诣, 杼, 鏖, 纳, 娄, 复, 旅,\n",
      "Nearest to 此: 今, 寓, 这, 衲, 嵚, 斯, 礼, 悠,\n",
      "Nearest to 空: 孤, 怅, 天, 暗, 耳, 粱, 思, 馀,\n",
      "Nearest to 梦: 佛, 鹧, 雏, 箫, 都, 蝣, 歇, 旖,\n",
      "Nearest to 楼: 台, 阁, 梯, 珍, 垆, 霸, 胧, 仁,\n",
      "Nearest to 酒: 窭, 脂, 忙, 辞, 贮, 箧, 谶, 幰,\n",
      "Average loss at step  62000 :  4.098139851212501\n",
      "Average loss at step  64000 :  4.1148922070264815\n",
      "Average loss at step  66000 :  4.130359442591667\n",
      "Average loss at step  68000 :  4.108073212265968\n",
      "Average loss at step  70000 :  4.1087473746836185\n",
      "Nearest to 醉: 尤, 氲, 笑, 骥, 因, 樱, 纽, 须,\n",
      "Nearest to 无: 衷, 何, 粼, 炽, 返, 窈, 不, 鄙,\n",
      "Nearest to 自: 谈, 某, 眈, 应, 还, 俚, 捉, 塑,\n",
      "Nearest to （: ，, 鞠, 、, 表, ·, 鸪, 缩, 锐,\n",
      "Nearest to 天: 豚, 空, 俯, 躔, 敞, 塍, 嘱, 珊,\n",
      "Nearest to 相: 颅, 酉, 徕, 域, 淑, 难, 执, 曾,\n",
      "Nearest to 仙: 攫, 南, 预, 甬, 妓, 派, 救, 归,\n",
      "Nearest to 。: ，, 、, 菩, \n",
      ", 让, 辀, 撤, ）,\n",
      "Nearest to 得: 取, 句, 君, 昔, 赖, 赢, 焙, 他,\n",
      "Nearest to 好: 总, 翥, 敌, 诱, 枷, 襄, 筛, 但,\n",
      "Nearest to 生: 篷, 杼, 蹀, 诣, 旅, 复, 纳, 为,\n",
      "Nearest to 此: 寓, 今, 这, 嵚, 衲, 一, 悠, 斯,\n",
      "Nearest to 空: 孤, 天, 耳, 怅, 暗, 馀, 方, 粱,\n",
      "Nearest to 梦: 佛, 雏, 箫, 蝣, 鹧, 歇, 飙, 凹,\n",
      "Nearest to 楼: 阁, 台, 梯, 糟, 胧, 霸, 垆, 珍,\n",
      "Nearest to 酒: 窭, 脂, 更, 箧, 辞, 贮, 忙, 谶,\n",
      "Average loss at step  72000 :  4.123440479040146\n",
      "Average loss at step  74000 :  4.122014698028565\n",
      "Average loss at step  76000 :  4.192399203658104\n",
      "Average loss at step  78000 :  4.197572778463364\n",
      "Average loss at step  80000 :  4.18986341893673\n",
      "Nearest to 醉: 笑, 氲, 尤, 骥, 宿, 因, 纽, 樱,\n",
      "Nearest to 无: 粼, 衷, 何, 鄙, 炽, 返, 窈, 漉,\n",
      "Nearest to 自: 谈, 某, 应, 往, 俚, 乞, 欣, 薛,\n",
      "Nearest to （: ，, 、, 鞠, 表, 鸪, ·, 泸, 锐,\n",
      "Nearest to 天: 豚, 俯, 空, 嘱, 躔, 滋, 珊, 樯,\n",
      "Nearest to 相: 酉, 域, 颅, 徒, 猿, 飘, 淑, 执,\n",
      "Nearest to 仙: 攫, 妓, 派, 救, 预, 甬, 欷, 葩,\n",
      "Nearest to 。: ，, 、, \n",
      ", 鹧, 菩, ）, 辀, 撤,\n",
      "Nearest to 得: 取, 句, 裂, 赢, 君, 他, , 涕,\n",
      "Nearest to 好: 翥, 总, 恰, 敌, 黟, 筛, 枷, 诱,\n",
      "Nearest to 生: 蹀, 篷, 杼, 诣, 旅, 鏖, 纳, 复,\n",
      "Nearest to 此: 寓, 今, 这, 衲, 嵚, 斯, 屡, 悠,\n",
      "Nearest to 空: 孤, 天, 耳, 暗, 怅, 盈, 但, 碾,\n",
      "Nearest to 梦: 佛, 鹧, 蝣, 恨, 箫, 侃, 较, 矣,\n",
      "Nearest to 楼: 梯, 台, 阁, 霸, 娇, 胧, 糟, 珍,\n",
      "Nearest to 酒: 窭, 脂, 更, 辞, 贮, 忙, 箧, 杯,\n",
      "Average loss at step  82000 :  4.169759987711906\n",
      "Average loss at step  84000 :  4.140705681800842\n",
      "Average loss at step  86000 :  4.187476668357849\n",
      "Average loss at step  88000 :  4.177230133891106\n",
      "Average loss at step  90000 :  4.100337274312973\n",
      "Nearest to 醉: 笑, 尤, 氲, 骥, 因, 纽, 宿, 劚,\n",
      "Nearest to 无: 何, 衷, 粼, 不, 返, 炽, 鄙, 漉,\n",
      "Nearest to 自: 谈, 应, 欣, 某, 还, 乞, 最, 俚,\n",
      "Nearest to （: ，, ·, 、, 鞠, \n",
      ", 鸪, 泸, 岱,\n",
      "Nearest to 天: 豚, 空, 俯, 嘱, 塍, 敞, 躔, 曙,\n",
      "Nearest to 相: 域, 酉, 飘, 曾, 邴, 颅, 执, 猿,\n",
      "Nearest to 仙: 攫, 妓, 南, 派, 预, 甬, 欷, 归,\n",
      "Nearest to 。: ，, 、, \n",
      ", 菩, ）, 辀, 鹧, 撤,\n",
      "Nearest to 得: 取, 句, 君, 年, 赢, 裂, 见, 他,\n",
      "Nearest to 好: 翥, 总, 敌, 黟, 恰, 襄, 诱, 触,\n",
      "Nearest to 生: 诣, 篷, 杼, 蹀, 旅, 鏖, 纳, 复,\n",
      "Nearest to 此: 寓, 今, 衲, 嵚, 这, 屡, 有, 悠,\n",
      "Nearest to 空: 孤, 天, 暗, 但, 盈, 耳, 怅, 沓,\n",
      "Nearest to 梦: 佛, 恨, 蝣, 事, 思, 鹧, 箫, 歇,\n",
      "Nearest to 楼: 台, 梯, 阁, 霸, 城, 珍, 糟, 渌,\n",
      "Nearest to 酒: 窭, 脂, 杯, 更, 啜, 箧, 辞, 贮,\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average loss at step  92000 :  4.0480333459377285\n",
      "Average loss at step  94000 :  4.0823291928768155\n",
      "Average loss at step  96000 :  4.057304111838341\n",
      "Average loss at step  98000 :  4.077854610919952\n",
      "Average loss at step  100000 :  4.056612766623497\n",
      "Nearest to 醉: 笑, 尤, 氲, 骥, 须, 瓷, 晁, 樱,\n",
      "Nearest to 无: 衷, 何, 粼, 不, 返, 鄙, 窈, 。,\n",
      "Nearest to 自: 谈, 还, 欣, 应, 乞, 俚, 某, 况,\n",
      "Nearest to （: ·, 鞠, 、, ，, 鸪, 泸, 表, \n",
      ",\n",
      "Nearest to 天: 空, 豚, 俯, 嘱, 敞, 躔, 塍, 奚,\n",
      "Nearest to 相: 域, 颅, 酉, 徕, 飘, 曾, 删, 邴,\n",
      "Nearest to 仙: 攫, 预, 归, 南, 妓, 甬, 救, 葩,\n",
      "Nearest to 。: ，, 、, \n",
      ", 菩, 勍, 谒, 撤, ）,\n",
      "Nearest to 得: 取, 君, 做, 句, 见, 窭, 赢, 他,\n",
      "Nearest to 好: 翥, 总, 敌, 乐, 襄, 诱, 恰, 黟,\n",
      "Nearest to 生: 蹀, 旅, 杼, 诣, 篷, 纳, 复, 暖,\n",
      "Nearest to 此: 寓, 今, 嵚, 这, 衲, 一, 悠, 屡,\n",
      "Nearest to 空: 孤, 天, 但, 暗, 盈, 耳, 怅, 馀,\n",
      "Nearest to 梦: 佛, 蝣, 恨, 凹, 事, 歇, 箫, 鹧,\n",
      "Nearest to 楼: 台, 梯, 阁, 城, 霸, 糟, 峰, 胧,\n",
      "Nearest to 酒: 窭, 更, 杯, 脂, 箧, 啜, 辞, 贮,\n"
     ]
    }
   ],
   "source": [
    "graph = tf.Graph()\n",
    "\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data.\n",
    "  train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n",
    "  train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n",
    "  valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n",
    "\n",
    "  # Ops and variables pinned to the CPU because of missing GPU implementation\n",
    "  with tf.device('/cpu:0'):\n",
    "    # Look up embeddings for inputs.\n",
    "    embeddings = tf.Variable(\n",
    "        tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) #这就是我们希望得到的嵌入向量\n",
    "    embed = tf.nn.embedding_lookup(embeddings, train_inputs) #查表得到词向量\n",
    "\n",
    "    # Construct the variables for the NCE loss\n",
    "    nce_weights = tf.Variable(\n",
    "        tf.truncated_normal([vocabulary_size, embedding_size],\n",
    "                            stddev=1.0 / math.sqrt(embedding_size)))\n",
    "    nce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n",
    "\n",
    "  # Compute the average NCE loss for the batch.\n",
    "  # tf.nce_loss automatically draws a new sample of the negative labels each\n",
    "  # time we evaluate the loss.\n",
    "  # Explanation of the meaning of NCE loss:\n",
    "  # http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/\n",
    "  loss = tf.reduce_mean(\n",
    "      tf.nn.nce_loss(weights=nce_weights,\n",
    "                     biases=nce_biases,\n",
    "                     labels=train_labels,\n",
    "                     inputs=embed,\n",
    "                     num_sampled=num_sampled,\n",
    "                     num_classes=vocabulary_size))\n",
    "\n",
    "  # Construct the SGD optimizer using a learning rate of 1.0.\n",
    "  optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)\n",
    "\n",
    "  # Compute the cosine similarity between minibatch examples and all embeddings.\n",
    "  norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n",
    "  normalized_embeddings = embeddings / norm\n",
    "  valid_embeddings = tf.nn.embedding_lookup(\n",
    "      normalized_embeddings, valid_dataset)\n",
    "  similarity = tf.matmul(\n",
    "      valid_embeddings, normalized_embeddings, transpose_b=True)\n",
    "\n",
    "  # Add variable initializer.\n",
    "  init = tf.global_variables_initializer()\n",
    "\n",
    "# Step 5: Begin training.\n",
    "num_steps = 100001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  # We must initialize all variables before we use them.\n",
    "  init.run()\n",
    "  print('Initialized')\n",
    "\n",
    "  average_loss = 0\n",
    "  for step in xrange(num_steps):\n",
    "    batch_inputs, batch_labels = generate_batch(\n",
    "        batch_size, num_skips, skip_window)\n",
    "    feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}\n",
    "\n",
    "    # We perform one update step by evaluating the optimizer op (including it\n",
    "    # in the list of returned values for session.run()\n",
    "    _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)\n",
    "    average_loss += loss_val\n",
    "\n",
    "    if step % 2000 == 0:\n",
    "      if step > 0:\n",
    "        average_loss /= 2000\n",
    "      # The average loss is an estimate of the loss over the last 2000 batches.\n",
    "      print('Average loss at step ', step, ': ', average_loss)\n",
    "      average_loss = 0\n",
    "\n",
    "    # Note that this is expensive (~20% slowdown if computed every 500 steps)\n",
    "    if step % 10000 == 0:\n",
    "      sim = similarity.eval()\n",
    "      for i in xrange(valid_size):\n",
    "        valid_word = reverse_dictionary[valid_examples[i]]\n",
    "        top_k = 8  # number of nearest neighbors\n",
    "        nearest = (-sim[i, :]).argsort()[1:top_k + 1]\n",
    "        log_str = 'Nearest to %s:' % valid_word\n",
    "        for k in xrange(top_k):\n",
    "          close_word = reverse_dictionary[nearest[k]]\n",
    "          log_str = '%s %s,' % (log_str, close_word)\n",
    "        print(log_str)\n",
    "  final_embeddings = normalized_embeddings.eval()\n",
    "\n",
    "# Step 6: Visualize the embeddings.\n",
    "# pylint: disable=missing-docstring\n",
    "# Function to draw visualization of distance between embeddings.\n",
    "def plot_with_labels(low_dim_embs, labels, filename):\n",
    "  assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'\n",
    "  plt.figure(figsize=(18, 18))  # in inches\n",
    "  for i, label in enumerate(labels):\n",
    "    x, y = low_dim_embs[i, :]\n",
    "    plt.scatter(x, y)\n",
    "    plt.annotate(label,\n",
    "                 xy=(x, y),\n",
    "                 xytext=(5, 2),\n",
    "                 textcoords='offset points',\n",
    "                 ha='right',\n",
    "                 va='bottom')\n",
    "\n",
    "  plt.savefig(filename)\n",
    "\n",
    "try:\n",
    "  # pylint: disable=g-import-not-at-top\n",
    "  from sklearn.manifold import TSNE\n",
    "  import matplotlib.pyplot as plt\n",
    "\n",
    "  #add by zuosi\n",
    "  #plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\n",
    "  #plt.rcParams['axes.unicode_minus']=False #用来正常显示负号\n",
    "  mpl.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签\n",
    "  mpl.rcParams['axes.unicode_minus'] = False #用来正常显示负号\n",
    "    \n",
    "  tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')\n",
    "  plot_only = 500\n",
    "  low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])\n",
    "  labels = [reverse_dictionary[i] for i in xrange(plot_only)]\n",
    "  plot_with_labels(low_dim_embs, labels, 'tsne.png')\n",
    "except ImportError as ex:\n",
    "  print('Please install sklearn, matplotlib, and scipy to show embeddings.')\n",
    "  print(ex)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "np.save('embedding.npy', final_embeddings)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
