{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "\n",
    "class Model_vec:\n",
    "    \n",
    "    def __init__(self, batch_size, dimension_size, learning_rate, vocabulary_size):\n",
    "        self.train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n",
    "        self.train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n",
    "        embeddings = tf.Variable(tf.random_uniform([vocabulary_size, dimension_size], -1.0, 1.0))\n",
    "        embed = tf.nn.embedding_lookup(embeddings, self.train_inputs)\n",
    "        self.nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, dimension_size], stddev = 1.0 / np.sqrt(dimension_size)))\n",
    "        self.nce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n",
    "        self.loss = tf.reduce_mean(tf.nn.nce_loss(weights = self.nce_weights, biases = self.nce_biases, labels = self.train_labels,\n",
    "                                                  inputs=embed, num_sampled = batch_size / 2, num_classes = vocabulary_size))\n",
    "        self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)\n",
    "        self.norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n",
    "        self.normalized_embeddings = embeddings / self.norm\n",
    "        \n",
    "class Model:\n",
    "    \n",
    "    def __init__(self, dimension_input, size_layer, dimension_output, learning_rate):\n",
    "        self.X = tf.placeholder(tf.float32, [None, dimension_input])\n",
    "        self.Y = tf.placeholder(tf.float32, [None, dimension_output])\n",
    "        layer1 = tf.Variable(tf.random_normal([dimension_input, size_layer], stddev=0.5))\n",
    "        bias1 = tf.Variable(tf.random_normal([size_layer], stddev=0.1))\n",
    "        layer2 = tf.Variable(tf.random_normal([size_layer, size_layer], stddev=0.5))\n",
    "        bias2 = tf.Variable(tf.random_normal([size_layer], stddev=0.1))\n",
    "        layer3 = tf.Variable(tf.random_normal([size_layer, size_layer], stddev=0.5))\n",
    "        bias3 = tf.Variable(tf.random_normal([size_layer], stddev=0.1))\n",
    "        layer4 = tf.Variable(tf.random_normal([size_layer, dimension_output], stddev=0.5))\n",
    "        bias4 = tf.Variable(tf.random_normal([dimension_output], stddev=0.1))\n",
    "        feed = tf.nn.tanh(tf.matmul(self.X, layer1) + bias1)\n",
    "        feed = tf.nn.tanh(tf.matmul(feed, layer2) + bias2)\n",
    "        feed = tf.nn.tanh(tf.matmul(feed, layer3) + bias3)\n",
    "        self.logits = tf.matmul(feed, layer4) + bias4\n",
    "        self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self.logits, labels = self.Y))\n",
    "        l2 = sum(0.0005 * tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())\n",
    "        self.cost += l2\n",
    "        self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)\n",
    "        self.correct_pred = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))\n",
    "        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import os\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "import re\n",
    "import collections\n",
    "import random\n",
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def clearstring(string):\n",
    "    string = re.sub('[^\\'\\\"A-Za-z0-9 ]+', '', string)\n",
    "    string = string.split(' ')\n",
    "    string = filter(None, string)\n",
    "    string = [y.strip() for y in string]\n",
    "    return ' '.join(string)\n",
    "\n",
    "def read_data():\n",
    "    list_folder = os.listdir('data/')\n",
    "    label = list_folder\n",
    "    label.sort()\n",
    "    outer_string, outer_label = [], []\n",
    "    for i in range(len(list_folder)):\n",
    "        list_file = os.listdir('data/' + list_folder[i])\n",
    "        strings = []\n",
    "        for x in range(len(list_file)):\n",
    "            with open('data/' + list_folder[i] + '/' + list_file[x], 'r') as fopen:\n",
    "                strings += fopen.read().split('\\n')\n",
    "        strings = list(filter(None, strings))\n",
    "        for k in range(len(strings)):\n",
    "            strings[k] = clearstring(strings[k])\n",
    "        labels = [i] * len(strings)\n",
    "        outer_string += strings\n",
    "        outer_label += labels\n",
    "    \n",
    "    dataset = np.array([outer_string, outer_label])\n",
    "    dataset = dataset.T\n",
    "    np.random.shuffle(dataset)\n",
    "    \n",
    "    string = []\n",
    "    for i in range(dataset.shape[0]):\n",
    "        string += dataset[i][0].split()\n",
    "    \n",
    "    return string, dataset, label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def build_dataset(words, vocabulary_size):\n",
    "    count = []\n",
    "    count.extend(collections.Counter(words).most_common(vocabulary_size))\n",
    "    dictionary = dict()\n",
    "    for word, _ in count:\n",
    "        dictionary[word] = len(dictionary) + 1\n",
    "    data = []\n",
    "    unk_count = 0\n",
    "    for word in words:\n",
    "        if word in dictionary:\n",
    "            index = dictionary[word]\n",
    "        data.append(index)\n",
    "    dictionary['PAD'] = 0\n",
    "    reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n",
    "    return data, dictionary, reverse_dictionary\n",
    "\n",
    "def generate_batch_skipgram(words, batch_size, num_skips, skip_window):\n",
    "    data_index = 0\n",
    "    assert batch_size % num_skips == 0\n",
    "    assert num_skips <= 2 * skip_window\n",
    "    batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n",
    "    labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n",
    "    span = 2 * skip_window + 1\n",
    "    buffer = collections.deque(maxlen=span)\n",
    "    for i in range(span):\n",
    "        buffer.append(words[data_index])\n",
    "        data_index = (data_index + 1) % len(words)\n",
    "    for i in range(batch_size // num_skips):\n",
    "        target = skip_window\n",
    "        targets_to_avoid = [skip_window]\n",
    "        for j in range(num_skips):\n",
    "            while target in targets_to_avoid:\n",
    "                target = random.randint(0, span - 1)\n",
    "            targets_to_avoid.append(target)\n",
    "            batch[i * num_skips + j] = buffer[skip_window]\n",
    "            labels[i * num_skips + j, 0] = buffer[target]\n",
    "        buffer.append(words[data_index])\n",
    "        data_index = (data_index + 1) % len(words)\n",
    "    data_index = (data_index + len(words) - span) % len(words)\n",
    "    return batch, labels\n",
    "\n",
    "def generatevector(dimension, batch_size, skip_size, skip_window, num_skips, iteration, words_real):\n",
    "    \n",
    "    print (\"Data size:\", len(words_real))\n",
    "    data, dictionary, reverse_dictionary = build_dataset(words_real, len(words_real))\n",
    "    sess = tf.InteractiveSession()\n",
    "    print (\"Creating Word2Vec model..\")\n",
    "    model = Model_vec(batch_size, dimension, 0.1, len(dictionary))\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    last_time = time.time()\n",
    "    for step in range(iteration):\n",
    "        new_time = time.time()\n",
    "        batch_inputs, batch_labels = generate_batch_skipgram(data, batch_size, num_skips, skip_window)\n",
    "        feed_dict = {model.train_inputs: batch_inputs, model.train_labels: batch_labels}\n",
    "        _, loss = sess.run([model.optimizer, model.loss], feed_dict=feed_dict)\n",
    "        if ((step + 1) % 1000) == 0:\n",
    "            print (\"epoch:\", step + 1, \", loss:\", loss, \", speed:\", (time.time() - new_time) * 1000, \"s / 1000 epoch\")\n",
    "    tf.reset_default_graph()       \n",
    "    return dictionary, reverse_dictionary, model.normalized_embeddings.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "string, data, label = read_data()\n",
    "location = os.getcwd()\n",
    "dimension = 512\n",
    "skip_size = 8\n",
    "skip_window = 1\n",
    "num_skips = 2\n",
    "iteration_train_vectors = 20000\n",
    "num_layers = 3\n",
    "size_layer = 256\n",
    "learning_rate = 0.0001\n",
    "epoch = 100\n",
    "batch = 100\n",
    "maxlen = 50"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Data size: 8007324\n",
      "Creating Word2Vec model..\n",
      "epoch: 1000 , loss: 20.2202 , speed: 76.17592811584473 s / 1000 epoch\n",
      "epoch: 2000 , loss: 21.1476 , speed: 75.83856582641602 s / 1000 epoch\n",
      "epoch: 3000 , loss: 60.0386 , speed: 76.28560066223145 s / 1000 epoch\n",
      "epoch: 4000 , loss: 23.9346 , speed: 76.0960578918457 s / 1000 epoch\n",
      "epoch: 5000 , loss: 31.1849 , speed: 76.11989974975586 s / 1000 epoch\n",
      "epoch: 6000 , loss: 31.3076 , speed: 76.03096961975098 s / 1000 epoch\n",
      "epoch: 7000 , loss: 30.9381 , speed: 76.13253593444824 s / 1000 epoch\n",
      "epoch: 8000 , loss: 30.5808 , speed: 76.02190971374512 s / 1000 epoch\n",
      "epoch: 9000 , loss: 7.30779 , speed: 76.32803916931152 s / 1000 epoch\n",
      "epoch: 10000 , loss: 23.1935 , speed: 76.1103630065918 s / 1000 epoch\n",
      "epoch: 11000 , loss: 30.0706 , speed: 76.21216773986816 s / 1000 epoch\n",
      "epoch: 12000 , loss: 12.0578 , speed: 76.06911659240723 s / 1000 epoch\n",
      "epoch: 13000 , loss: 12.3838 , speed: 76.26795768737793 s / 1000 epoch\n",
      "epoch: 14000 , loss: 15.965 , speed: 76.4620304107666 s / 1000 epoch\n",
      "epoch: 15000 , loss: 12.5769 , speed: 76.2171745300293 s / 1000 epoch\n",
      "epoch: 16000 , loss: 38.774 , speed: 76.2166976928711 s / 1000 epoch\n",
      "epoch: 17000 , loss: 37.7989 , speed: 76.15184783935547 s / 1000 epoch\n",
      "epoch: 18000 , loss: 1.23239 , speed: 76.17473602294922 s / 1000 epoch\n",
      "epoch: 19000 , loss: 5.32502 , speed: 76.23124122619629 s / 1000 epoch\n",
      "epoch: 20000 , loss: 3.84343 , speed: 76.12085342407227 s / 1000 epoch\n"
     ]
    }
   ],
   "source": [
    "dictionary, reverse_dictionary, vectors = generatevector(dimension, 32, skip_size, skip_window, num_skips, iteration_train_vectors, string)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from sklearn.cross_validation import train_test_split\n",
    "train_X, test_X, train_Y, test_Y = train_test_split(data[:, 0], data[:, 1], test_size = 0.25)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0 , pass acc: 0 , current acc: 0.277380028622\n",
      "epoch: 1 , training loss: 9.78156181886 , training acc: 0.25695456574 , valid loss: 8.53324538 , valid acc: 0.277380028622\n",
      "epoch: 1 , pass acc: 0.277380028622 , current acc: 0.327456803245\n",
      "epoch: 2 , training loss: 7.90836092546 , training acc: 0.301628268588 , valid loss: 7.43985049601 , valid acc: 0.327456803245\n",
      "epoch: 2 , pass acc: 0.327456803245 , current acc: 0.349126667957\n",
      "epoch: 3 , training loss: 7.07367070775 , training acc: 0.339158658131 , valid loss: 6.66108467071 , valid acc: 0.349126667957\n",
      "epoch: 3 , pass acc: 0.349126667957 , current acc: 0.375844517433\n",
      "epoch: 4 , training loss: 6.07985553403 , training acc: 0.360930890193 , valid loss: 5.44558788124 , valid acc: 0.375844517433\n",
      "epoch: 4 , pass acc: 0.375844517433 , current acc: 0.405834920461\n",
      "epoch: 5 , training loss: 4.79457229189 , training acc: 0.393272540434 , valid loss: 4.2004150333 , valid acc: 0.405834920461\n",
      "epoch: 5 , pass acc: 0.405834920461 , current acc: 0.435345476025\n",
      "epoch: 6 , training loss: 3.72316778637 , training acc: 0.426074842726 , valid loss: 3.33146564242 , valid acc: 0.435345476025\n",
      "epoch: 6 , pass acc: 0.435345476025 , current acc: 0.475854110237\n",
      "epoch: 7 , training loss: 3.03106678493 , training acc: 0.462584758064 , valid loss: 2.7981545138 , valid acc: 0.475854110237\n",
      "epoch: 7 , pass acc: 0.475854110237 , current acc: 0.519069079646\n",
      "epoch: 8 , training loss: 2.59719088157 , training acc: 0.507856667623 , valid loss: 2.45473867781 , valid acc: 0.519069079646\n",
      "epoch: 8 , pass acc: 0.519069079646 , current acc: 0.550422244989\n",
      "epoch: 9 , training loss: 2.30784446093 , training acc: 0.546679441603 , valid loss: 2.21844901313 , valid acc: 0.550422244989\n",
      "epoch: 9 , pass acc: 0.550422244989 , current acc: 0.574779248484\n",
      "epoch: 10 , training loss: 2.10113184096 , training acc: 0.575908488405 , valid loss: 2.04568232605 , valid acc: 0.574779248484\n",
      "epoch: 10 , pass acc: 0.574779248484 , current acc: 0.591756218197\n",
      "epoch: 11 , training loss: 1.94572295688 , training acc: 0.596538687345 , valid loss: 1.91282545662 , valid acc: 0.591756218197\n",
      "epoch: 11 , pass acc: 0.591756218197 , current acc: 0.603733184642\n",
      "epoch: 12 , training loss: 1.82364023868 , training acc: 0.61265513033 , valid loss: 1.80633390362 , valid acc: 0.603733184642\n",
      "epoch: 12 , pass acc: 0.603733184642 , current acc: 0.615575796326\n",
      "epoch: 13 , training loss: 1.72369987482 , training acc: 0.625905290132 , valid loss: 1.71731446904 , valid acc: 0.615575796326\n",
      "epoch: 13 , pass acc: 0.615575796326 , current acc: 0.626055641904\n",
      "epoch: 14 , training loss: 1.63867005029 , training acc: 0.638064598435 , valid loss: 1.64031605872 , valid acc: 0.626055641904\n",
      "epoch: 14 , pass acc: 0.626055641904 , current acc: 0.635902091975\n",
      "epoch: 15 , training loss: 1.563989641 , training acc: 0.649392173981 , valid loss: 1.57228066608 , valid acc: 0.635902091975\n",
      "epoch: 15 , pass acc: 0.635902091975 , current acc: 0.645316678308\n",
      "epoch: 16 , training loss: 1.49685180496 , training acc: 0.661110023238 , valid loss: 1.51124032651 , valid acc: 0.645316678308\n",
      "epoch: 16 , pass acc: 0.645316678308 , current acc: 0.655806121407\n",
      "epoch: 17 , training loss: 1.43556870208 , training acc: 0.672332032476 , valid loss: 1.45584829972 , valid acc: 0.655806121407\n",
      "epoch: 17 , pass acc: 0.655806121407 , current acc: 0.666151611167\n",
      "epoch: 18 , training loss: 1.37920011703 , training acc: 0.684027489492 , valid loss: 1.40526418459 , valid acc: 0.666151611167\n",
      "epoch: 18 , pass acc: 0.666151611167 , current acc: 0.676026850996\n",
      "epoch: 19 , training loss: 1.32727793486 , training acc: 0.69588289662 , valid loss: 1.35900758084 , valid acc: 0.676026850996\n",
      "epoch: 19 , pass acc: 0.676026850996 , current acc: 0.685086351439\n",
      "epoch: 20 , training loss: 1.27961195286 , training acc: 0.706689039466 , valid loss: 1.31676393736 , valid acc: 0.685086351439\n",
      "epoch: 20 , pass acc: 0.685086351439 , current acc: 0.694145852224\n",
      "epoch: 21 , training loss: 1.23609020671 , training acc: 0.716663447886 , valid loss: 1.27829192467 , valid acc: 0.694145852224\n",
      "epoch: 21 , pass acc: 0.694145852224 , current acc: 0.702245660455\n",
      "epoch: 22 , training loss: 1.19652550911 , training acc: 0.725582194401 , valid loss: 1.2433556622 , valid acc: 0.702245660455\n",
      "epoch: 22 , pass acc: 0.702245660455 , current acc: 0.709529730557\n",
      "epoch: 23 , training loss: 1.16062192822 , training acc: 0.734193839557 , valid loss: 1.21169743171 , valid acc: 0.709529730557\n",
      "epoch: 23 , pass acc: 0.709529730557 , current acc: 0.716046045872\n",
      "epoch: 24 , training loss: 1.12802018014 , training acc: 0.741970551277 , valid loss: 1.18304485363 , valid acc: 0.716046045872\n",
      "epoch: 24 , pass acc: 0.716046045872 , current acc: 0.721957753579\n",
      "epoch: 25 , training loss: 1.09835098046 , training acc: 0.748637217401 , valid loss: 1.15711581787 , valid acc: 0.721957753579\n",
      "epoch: 25 , pass acc: 0.721957753579 , current acc: 0.727303245189\n",
      "epoch: 26 , training loss: 1.07127217709 , training acc: 0.754945598996 , valid loss: 1.13362655593 , valid acc: 0.727303245189\n",
      "epoch: 26 , pass acc: 0.727303245189 , current acc: 0.732005739052\n",
      "epoch: 27 , training loss: 1.04648322702 , training acc: 0.760633378496 , valid loss: 1.11230195169 , valid acc: 0.732005739052\n",
      "epoch: 27 , pass acc: 0.732005739052 , current acc: 0.736343552478\n",
      "epoch: 28 , training loss: 1.02372356862 , training acc: 0.765694159121 , valid loss: 1.09288477062 , valid acc: 0.736343552478\n",
      "epoch: 28 , pass acc: 0.736343552478 , current acc: 0.740441440335\n",
      "epoch: 29 , training loss: 1.00276627976 , training acc: 0.770473431145 , valid loss: 1.07514260186 , valid acc: 0.740441440335\n",
      "epoch: 29 , pass acc: 0.740441440335 , current acc: 0.744078678442\n",
      "epoch: 30 , training loss: 0.983413829856 , training acc: 0.775003181614 , valid loss: 1.0588734905 , valid acc: 0.744078678442\n",
      "epoch: 30 , pass acc: 0.744078678442 , current acc: 0.747197678416\n",
      "epoch: 31 , training loss: 0.965495141935 , training acc: 0.779296207451 , valid loss: 1.04390603138 , valid acc: 0.747197678416\n",
      "epoch: 31 , pass acc: 0.747197678416 , current acc: 0.75021111662\n",
      "epoch: 32 , training loss: 0.948862076912 , training acc: 0.783112585182 , valid loss: 1.0300952588 , valid acc: 0.75021111662\n",
      "epoch: 32 , pass acc: 0.75021111662 , current acc: 0.752821480244\n",
      "epoch: 33 , training loss: 0.933386005371 , training acc: 0.786493903826 , valid loss: 1.01731812216 , valid acc: 0.752821480244\n",
      "epoch: 33 , pass acc: 0.752821480244 , current acc: 0.755441442675\n",
      "epoch: 34 , training loss: 0.918954723456 , training acc: 0.789491344036 , valid loss: 1.00546925894 , valid acc: 0.755441442675\n",
      "epoch: 34 , pass acc: 0.755441442675 , current acc: 0.757888656775\n",
      "epoch: 35 , training loss: 0.905470150866 , training acc: 0.792594351241 , valid loss: 0.994457891543 , valid acc: 0.757888656775\n",
      "epoch: 35 , pass acc: 0.757888656775 , current acc: 0.759721671551\n",
      "epoch: 36 , training loss: 0.892846157973 , training acc: 0.795361465853 , valid loss: 0.984204768963 , valid acc: 0.759721671551\n",
      "epoch: 36 , pass acc: 0.759721671551 , current acc: 0.761487505658\n",
      "epoch: 37 , training loss: 0.881006798642 , training acc: 0.797930244292 , valid loss: 0.974640271085 , valid acc: 0.761487505658\n",
      "epoch: 37 , pass acc: 0.761487505658 , current acc: 0.763560443316\n",
      "epoch: 38 , training loss: 0.869884717373 , training acc: 0.800531011473 , valid loss: 0.965702840097 , valid acc: 0.763560443316\n",
      "epoch: 38 , pass acc: 0.763560443316 , current acc: 0.765681364715\n",
      "epoch: 39 , training loss: 0.859419808488 , training acc: 0.802866264749 , valid loss: 0.95733787168 , valid acc: 0.765681364715\n",
      "epoch: 39 , pass acc: 0.765681364715 , current acc: 0.767312841944\n",
      "epoch: 40 , training loss: 0.849558063543 , training acc: 0.805172727952 , valid loss: 0.94949720288 , valid acc: 0.767312841944\n",
      "epoch: 40 , pass acc: 0.767312841944 , current acc: 0.768522056531\n",
      "epoch: 41 , training loss: 0.84025060425 , training acc: 0.807351230297 , valid loss: 0.942138697959 , valid acc: 0.768522056531\n",
      "epoch: 41 , pass acc: 0.768522056531 , current acc: 0.769673686796\n",
      "epoch: 42 , training loss: 0.83145308996 , training acc: 0.809337794971 , valid loss: 0.935225589033 , valid acc: 0.769673686796\n",
      "epoch: 42 , pass acc: 0.769673686796 , current acc: 0.771333956615\n",
      "epoch: 43 , training loss: 0.823125128172 , training acc: 0.811321160176 , valid loss: 0.928725955754 , valid acc: 0.771333956615\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 43 , pass acc: 0.771333956615 , current acc: 0.772744705661\n",
      "epoch: 44 , training loss: 0.815229911798 , training acc: 0.81295903621 , valid loss: 0.922611611647 , valid acc: 0.772744705661\n",
      "epoch: 44 , pass acc: 0.772744705661 , current acc: 0.773618026395\n",
      "epoch: 45 , training loss: 0.807733850779 , training acc: 0.814478550366 , valid loss: 0.9168575469 , valid acc: 0.773618026395\n",
      "epoch: 45 , pass acc: 0.773618026395 , current acc: 0.774520138289\n",
      "epoch: 46 , training loss: 0.800606312007 , training acc: 0.81601725758 , valid loss: 0.911440972403 , valid acc: 0.774520138289\n",
      "epoch: 46 , pass acc: 0.774520138289 , current acc: 0.775652573971\n",
      "epoch: 47 , training loss: 0.793819363062 , training acc: 0.817735109833 , valid loss: 0.906340670792 , valid acc: 0.775652573971\n",
      "epoch: 47 , pass acc: 0.775652573971 , current acc: 0.776353150663\n",
      "epoch: 48 , training loss: 0.787347490827 , training acc: 0.819309005067 , valid loss: 0.901536854181 , valid acc: 0.776353150663\n",
      "epoch: 48 , pass acc: 0.776353150663 , current acc: 0.777475990798\n",
      "epoch: 49 , training loss: 0.781167472297 , training acc: 0.820892499127 , valid loss: 0.897010518756 , valid acc: 0.777475990798\n",
      "epoch: 49 , pass acc: 0.777475990798 , current acc: 0.778042209297\n",
      "epoch: 50 , training loss: 0.775258056414 , training acc: 0.822309645757 , valid loss: 0.892743637431 , valid acc: 0.778042209297\n",
      "epoch: 50 , pass acc: 0.778042209297 , current acc: 0.77880996317\n",
      "epoch: 51 , training loss: 0.769599874357 , training acc: 0.823787574061 , valid loss: 0.888718960953 , valid acc: 0.77880996317\n",
      "epoch: 51 , pass acc: 0.77880996317 , current acc: 0.779203436768\n",
      "epoch: 52 , training loss: 0.76417524696 , training acc: 0.824952000353 , valid loss: 0.884919910422 , valid acc: 0.779203436768\n",
      "epoch: 52 , pass acc: 0.779203436768 , current acc: 0.779990384023\n",
      "epoch: 53 , training loss: 0.758968014749 , training acc: 0.826180408475 , valid loss: 0.881331092737 , valid acc: 0.779990384023\n",
      "epoch: 53 , pass acc: 0.779990384023 , current acc: 0.780777333737\n",
      "epoch: 54 , training loss: 0.753963443479 , training acc: 0.827383223342 , valid loss: 0.877937353103 , valid acc: 0.780777333737\n",
      "epoch: 54 , pass acc: 0.780777333737 , current acc: 0.78112282245\n",
      "epoch: 55 , training loss: 0.749148098193 , training acc: 0.828368507893 , valid loss: 0.874724521118 , valid acc: 0.78112282245\n",
      "epoch: 55 , pass acc: 0.78112282245 , current acc: 0.781593071179\n",
      "epoch: 56 , training loss: 0.744509700259 , training acc: 0.829449763034 , valid loss: 0.871679135225 , valid acc: 0.781593071179\n",
      "epoch: 56 , pass acc: 0.781593071179 , current acc: 0.782312843026\n",
      "epoch: 57 , training loss: 0.740037050353 , training acc: 0.830566205699 , valid loss: 0.868788201669 , valid acc: 0.782312843026\n",
      "epoch: 57 , pass acc: 0.782312843026 , current acc: 0.782792688446\n",
      "epoch: 58 , training loss: 0.735719899756 , training acc: 0.831593076674 , valid loss: 0.866039547376 , valid acc: 0.782792688446\n",
      "epoch: 58 , pass acc: 0.782792688446 , current acc: 0.783253339968\n",
      "epoch: 59 , training loss: 0.731548918758 , training acc: 0.832591158396 , valid loss: 0.863421484857 , valid acc: 0.783253339968\n",
      "epoch: 59 , pass acc: 0.783253339968 , current acc: 0.783713992349\n",
      "epoch: 60 , training loss: 0.727515577431 , training acc: 0.833579641253 , valid loss: 0.860923159031 , valid acc: 0.783713992349\n",
      "epoch: 60 , pass acc: 0.783713992349 , current acc: 0.784145855171\n",
      "epoch: 61 , training loss: 0.723612042436 , training acc: 0.83440497878 , valid loss: 0.858533983782 , valid acc: 0.784145855171\n",
      "epoch: 61 , pass acc: 0.784145855171 , current acc: 0.784683282453\n",
      "epoch: 62 , training loss: 0.719831173838 , training acc: 0.83527509993 , valid loss: 0.856244664103 , valid acc: 0.784683282453\n",
      "epoch: 62 , pass acc: 0.784683282453 , current acc: 0.785518215908\n",
      "epoch: 63 , training loss: 0.716166401245 , training acc: 0.836170813554 , valid loss: 0.854046256609 , valid acc: 0.785518215908\n",
      "epoch: 63 , pass acc: 0.785518215908 , current acc: 0.786103627332\n",
      "epoch: 64 , training loss: 0.712611722912 , training acc: 0.83702813965 , valid loss: 0.851930742701 , valid acc: 0.786103627332\n",
      "epoch: 64 , pass acc: 0.786103627332 , current acc: 0.78665065302\n",
      "epoch: 65 , training loss: 0.709161558368 , training acc: 0.83796224265 , valid loss: 0.84989074599 , valid acc: 0.78665065302\n",
      "epoch: 65 , pass acc: 0.78665065302 , current acc: 0.78738001847\n",
      "epoch: 66 , training loss: 0.705810793054 , training acc: 0.838579643825 , valid loss: 0.847920022061 , valid acc: 0.78738001847\n",
      "epoch: 66 , pass acc: 0.78738001847 , current acc: 0.787917449298\n",
      "epoch: 67 , training loss: 0.702554691376 , training acc: 0.839472159105 , valid loss: 0.846012499019 , valid acc: 0.787917449298\n",
      "epoch: 67 , pass acc: 0.787917449298 , current acc: 0.78835890721\n",
      "epoch: 68 , training loss: 0.699388842684 , training acc: 0.840332684251 , valid loss: 0.844163461118 , valid acc: 0.78835890721\n",
      "epoch: 68 , pass acc: 0.78835890721 , current acc: 0.788829157082\n",
      "epoch: 69 , training loss: 0.696309142897 , training acc: 0.840914896415 , valid loss: 0.842368061449 , valid acc: 0.788829157082\n",
      "epoch: 69 , pass acc: 0.788829157082 , current acc: 0.789289810092\n",
      "epoch: 70 , training loss: 0.693311796784 , training acc: 0.84173703472 , valid loss: 0.840622831729 , valid acc: 0.789289810092\n",
      "epoch: 70 , pass acc: 0.789289810092 , current acc: 0.789712075022\n",
      "epoch: 71 , training loss: 0.690393212918 , training acc: 0.842338442535 , valid loss: 0.83892457968 , valid acc: 0.789712075022\n",
      "epoch: 71 , pass acc: 0.789712075022 , current acc: 0.790345472303\n",
      "epoch: 72 , training loss: 0.687550067921 , training acc: 0.843147783232 , valid loss: 0.837270422712 , valid acc: 0.790345472303\n",
      "epoch: 72 , pass acc: 0.790345472303 , current acc: 0.790556604311\n",
      "epoch: 73 , training loss: 0.684779214767 , training acc: 0.843816371278 , valid loss: 0.835658034985 , valid acc: 0.790556604311\n",
      "epoch: 73 , pass acc: 0.790556604311 , current acc: 0.79094047956\n",
      "epoch: 74 , training loss: 0.682077753481 , training acc: 0.84445936443 , valid loss: 0.834085492163 , valid acc: 0.79094047956\n",
      "epoch: 74 , pass acc: 0.79094047956 , current acc: 0.791305164401\n",
      "epoch: 75 , training loss: 0.67944290498 , training acc: 0.845108757493 , valid loss: 0.832551411555 , valid acc: 0.791305164401\n",
      "epoch: 75 , pass acc: 0.791305164401 , current acc: 0.791439520134\n",
      "epoch: 76 , training loss: 0.676872073803 , training acc: 0.845770945114 , valid loss: 0.831054384729 , valid acc: 0.791439520134\n",
      "epoch: 76 , pass acc: 0.791439520134 , current acc: 0.791554683001\n",
      "epoch: 77 , training loss: 0.674362803442 , training acc: 0.846381949848 , valid loss: 0.829593637168 , valid acc: 0.791554683001\n",
      "epoch: 77 , pass acc: 0.791554683001 , current acc: 0.792024933502\n",
      "epoch: 78 , training loss: 0.671912787104 , training acc: 0.846980159186 , valid loss: 0.828168327343 , valid acc: 0.792024933502\n",
      "epoch: 78 , pass acc: 0.792024933502 , current acc: 0.792370423474\n",
      "epoch: 79 , training loss: 0.669519829039 , training acc: 0.847568771146 , valid loss: 0.826777860379 , valid acc: 0.792370423474\n",
      "epoch: 79 , pass acc: 0.792370423474 , current acc: 0.792975029252\n",
      "epoch: 80 , training loss: 0.667181852559 , training acc: 0.848218163045 , valid loss: 0.825421550109 , valid acc: 0.792975029252\n",
      "epoch: 80 , pass acc: 0.792975029252 , current acc: 0.793330114771\n",
      "epoch: 81 , training loss: 0.664896886879 , training acc: 0.848845162723 , valid loss: 0.824099273062 , valid acc: 0.793330114771\n",
      "epoch: 81 , pass acc: 0.793330114771 , current acc: 0.793675605544\n",
      "epoch: 82 , training loss: 0.662663074798 , training acc: 0.84933140769 , valid loss: 0.822810756954 , valid acc: 0.793675605544\n",
      "epoch: 82 , pass acc: 0.793675605544 , current acc: 0.794059482509\n",
      "epoch: 83 , training loss: 0.660478632409 , training acc: 0.849891228796 , valid loss: 0.821555863015 , valid acc: 0.794059482509\n",
      "epoch: 83 , pass acc: 0.794059482509 , current acc: 0.794136256838\n",
      "epoch: 84 , training loss: 0.65834187833 , training acc: 0.850665380786 , valid loss: 0.820334330745 , valid acc: 0.794136256838\n",
      "epoch: 84 , pass acc: 0.794136256838 , current acc: 0.794587313614\n",
      "epoch: 85 , training loss: 0.656251197136 , training acc: 0.851225201987 , valid loss: 0.819146278209 , valid acc: 0.794587313614\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 85 , pass acc: 0.794587313614 , current acc: 0.794788849445\n",
      "epoch: 86 , training loss: 0.654205057343 , training acc: 0.851769028588 , valid loss: 0.817991905701 , valid acc: 0.794788849445\n",
      "epoch: 86 , pass acc: 0.794788849445 , current acc: 0.795249501311\n",
      "epoch: 87 , training loss: 0.65220199063 , training acc: 0.852290462357 , valid loss: 0.816871017847 , valid acc: 0.795249501311\n",
      "epoch: 87 , pass acc: 0.795249501311 , current acc: 0.795566198522\n",
      "epoch: 88 , training loss: 0.650240615029 , training acc: 0.852872675474 , valid loss: 0.815784011572 , valid acc: 0.795566198522\n",
      "epoch: 88 , pass acc: 0.795566198522 , current acc: 0.7959980602\n",
      "epoch: 89 , training loss: 0.648319590372 , training acc: 0.853362120102 , valid loss: 0.814730613566 , valid acc: 0.7959980602\n",
      "epoch: 89 , pass acc: 0.7959980602 , current acc: 0.796506697923\n",
      "epoch: 90 , training loss: 0.646437645588 , training acc: 0.853909144398 , valid loss: 0.81371073891 , valid acc: 0.796506697923\n",
      "epoch: 90 , pass acc: 0.796506697923 , current acc: 0.796804202724\n",
      "epoch: 91 , training loss: 0.644593538427 , training acc: 0.854424179172 , valid loss: 0.812724148644 , valid acc: 0.796804202724\n",
      "epoch: 91 , pass acc: 0.796804202724 , current acc: 0.797092110834\n",
      "epoch: 92 , training loss: 0.642786078417 , training acc: 0.855057576854 , valid loss: 0.811770215087 , valid acc: 0.797092110834\n",
      "epoch: 92 , pass acc: 0.797092110834 , current acc: 0.797428004973\n",
      "epoch: 93 , training loss: 0.641014147056 , training acc: 0.85551823076 , valid loss: 0.810848285221 , valid acc: 0.797428004973\n",
      "epoch: 93 , pass acc: 0.797428004973 , current acc: 0.797744702698\n",
      "epoch: 94 , training loss: 0.639276631515 , training acc: 0.855882914247 , valid loss: 0.809957701391 , valid acc: 0.797744702698\n",
      "epoch: 94 , pass acc: 0.797744702698 , current acc: 0.798032610236\n",
      "epoch: 95 , training loss: 0.637572450224 , training acc: 0.856401148585 , valid loss: 0.809097671577 , valid acc: 0.798032610236\n",
      "epoch: 95 , pass acc: 0.798032610236 , current acc: 0.798368503059\n",
      "epoch: 96 , training loss: 0.635900584451 , training acc: 0.856964168626 , valid loss: 0.808267732728 , valid acc: 0.798368503059\n",
      "epoch: 96 , pass acc: 0.798368503059 , current acc: 0.798733186527\n",
      "epoch: 97 , training loss: 0.634260013271 , training acc: 0.857405627205 , valid loss: 0.807467282962 , valid acc: 0.798733186527\n",
      "epoch: 97 , pass acc: 0.798733186527 , current acc: 0.799088273591\n",
      "epoch: 98 , training loss: 0.632649745551 , training acc: 0.857738321341 , valid loss: 0.806695446973 , valid acc: 0.799088273591\n",
      "epoch: 98 , pass acc: 0.799088273591 , current acc: 0.799356985801\n",
      "epoch: 99 , training loss: 0.631068834455 , training acc: 0.85822456545 , valid loss: 0.805951416464 , valid acc: 0.799356985801\n",
      "epoch: 99 , pass acc: 0.799356985801 , current acc: 0.799625700186\n",
      "epoch: 100 , training loss: 0.629516345774 , training acc: 0.858640433654 , valid loss: 0.805234836125 , valid acc: 0.799625700186\n",
      "epoch: 100 , pass acc: 0.799625700186 , current acc: 0.799884816335\n",
      "epoch: 101 , training loss: 0.62799137538 , training acc: 0.85902750923 , valid loss: 0.804545142028 , valid acc: 0.799884816335\n",
      "epoch: 101 , pass acc: 0.799884816335 , current acc: 0.800182321879\n",
      "epoch: 102 , training loss: 0.626493072155 , training acc: 0.859369799371 , valid loss: 0.803881341602 , valid acc: 0.800182321879\n",
      "epoch: 102 , pass acc: 0.800182321879 , current acc: 0.800431843625\n",
      "epoch: 103 , training loss: 0.625020572984 , training acc: 0.859804860213 , valid loss: 0.803242883618 , valid acc: 0.800431843625\n",
      "epoch: 103 , pass acc: 0.800431843625 , current acc: 0.800575797251\n",
      "epoch: 104 , training loss: 0.623573041342 , training acc: 0.860147150049 , valid loss: 0.802629418886 , valid acc: 0.800575797251\n",
      "epoch: 104 , pass acc: 0.800575797251 , current acc: 0.800902092983\n",
      "epoch: 105 , training loss: 0.622149706926 , training acc: 0.86070377321 , valid loss: 0.802040049302 , valid acc: 0.800902092983\n",
      "epoch: 106 , training loss: 0.620749807079 , training acc: 0.861122839548 , valid loss: 0.801474086726 , valid acc: 0.800892495892\n",
      "epoch: 106 , pass acc: 0.800902092983 , current acc: 0.801199597727\n",
      "epoch: 107 , training loss: 0.619372599051 , training acc: 0.861426741838 , valid loss: 0.800931245661 , valid acc: 0.801199597727\n",
      "epoch: 107 , pass acc: 0.801199597727 , current acc: 0.801362744849\n",
      "epoch: 108 , training loss: 0.61801737729 , training acc: 0.861823414963 , valid loss: 0.800410447646 , valid acc: 0.801362744849\n",
      "epoch: 108 , pass acc: 0.801362744849 , current acc: 0.801602666987\n",
      "epoch: 109 , training loss: 0.616683454647 , training acc: 0.862152909954 , valid loss: 0.799911327989 , valid acc: 0.801602666987\n",
      "epoch: 109 , pass acc: 0.801602666987 , current acc: 0.801689041496\n",
      "epoch: 110 , training loss: 0.61537019079 , training acc: 0.862482404316 , valid loss: 0.799433159908 , valid acc: 0.801689041496\n",
      "epoch: 110 , pass acc: 0.801689041496 , current acc: 0.80185218919\n",
      "epoch: 111 , training loss: 0.614076942263 , training acc: 0.862802302178 , valid loss: 0.798975368064 , valid acc: 0.80185218919\n",
      "epoch: 111 , pass acc: 0.80185218919 , current acc: 0.802188081156\n",
      "epoch: 112 , training loss: 0.612803127061 , training acc: 0.863119001848 , valid loss: 0.798537157719 , valid acc: 0.802188081156\n",
      "epoch: 112 , pass acc: 0.802188081156 , current acc: 0.802351227706\n",
      "epoch: 113 , training loss: 0.611548154984 , training acc: 0.863422903947 , valid loss: 0.798118021318 , valid acc: 0.802351227706\n",
      "epoch: 113 , pass acc: 0.802351227706 , current acc: 0.802523973693\n",
      "epoch: 114 , training loss: 0.610311490694 , training acc: 0.863771591633 , valid loss: 0.797717317882 , valid acc: 0.802523973693\n",
      "epoch: 114 , pass acc: 0.802523973693 , current acc: 0.802802284997\n",
      "epoch: 115 , training loss: 0.60909258713 , training acc: 0.864065899635 , valid loss: 0.797334256963 , valid acc: 0.802802284997\n",
      "epoch: 115 , pass acc: 0.802802284997 , current acc: 0.802994223108\n",
      "epoch: 116 , training loss: 0.607890967702 , training acc: 0.864404990497 , valid loss: 0.796968499552 , valid acc: 0.802994223108\n",
      "epoch: 116 , pass acc: 0.802994223108 , current acc: 0.803224549355\n",
      "epoch: 117 , training loss: 0.606706150157 , training acc: 0.864811260179 , valid loss: 0.79661915467 , valid acc: 0.803224549355\n",
      "epoch: 117 , pass acc: 0.803224549355 , current acc: 0.803358906576\n",
      "epoch: 118 , training loss: 0.605537696167 , training acc: 0.865172744808 , valid loss: 0.796285634993 , valid acc: 0.803358906576\n",
      "epoch: 118 , pass acc: 0.803358906576 , current acc: 0.803483667278\n",
      "epoch: 119 , training loss: 0.604385153832 , training acc: 0.865415867273 , valid loss: 0.795967287786 , valid acc: 0.803483667278\n",
      "epoch: 119 , pass acc: 0.803483667278 , current acc: 0.803579637105\n",
      "epoch: 120 , training loss: 0.60324813416 , training acc: 0.865745362073 , valid loss: 0.795663381235 , valid acc: 0.803579637105\n",
      "epoch: 120 , pass acc: 0.803579637105 , current acc: 0.80385794881\n",
      "epoch: 121 , training loss: 0.602126272277 , training acc: 0.866122841149 , valid loss: 0.795373811088 , valid acc: 0.80385794881\n",
      "epoch: 121 , pass acc: 0.80385794881 , current acc: 0.804011498612\n",
      "epoch: 122 , training loss: 0.601019198858 , training acc: 0.866471529007 , valid loss: 0.795097604022 , valid acc: 0.804011498612\n",
      "epoch: 123 , training loss: 0.599926561222 , training acc: 0.866772233544 , valid loss: 0.794833973565 , valid acc: 0.803992305916\n",
      "epoch: 123 , pass acc: 0.804011498612 , current acc: 0.804414569702\n",
      "epoch: 124 , training loss: 0.598848049553 , training acc: 0.86701535639 , valid loss: 0.794582758378 , valid acc: 0.804414569702\n",
      "epoch: 124 , pass acc: 0.804414569702 , current acc: 0.804491344889\n",
      "epoch: 125 , training loss: 0.597783391484 , training acc: 0.867229687599 , valid loss: 0.794343145246 , valid acc: 0.804491344889\n",
      "epoch: 125 , pass acc: 0.804491344889 , current acc: 0.804760060075\n",
      "epoch: 126 , training loss: 0.596732264193 , training acc: 0.867581574812 , valid loss: 0.794114777228 , valid acc: 0.804760060075\n",
      "epoch: 127 , training loss: 0.595694421034 , training acc: 0.867840691591 , valid loss: 0.793896856326 , valid acc: 0.804616103303\n",
      "epoch: 127 , pass acc: 0.804760060075 , current acc: 0.804788850033\n",
      "epoch: 128 , training loss: 0.594669626336 , training acc: 0.868128600024 , valid loss: 0.793689309979 , valid acc: 0.804788850033\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 128 , pass acc: 0.804788850033 , current acc: 0.804913609934\n",
      "epoch: 129 , training loss: 0.593657608758 , training acc: 0.868490083833 , valid loss: 0.793491426448 , valid acc: 0.804913609934\n",
      "epoch: 130 , training loss: 0.592658165339 , training acc: 0.868717211622 , valid loss: 0.793302770654 , valid acc: 0.804769656194\n",
      "epoch: 130 , pass acc: 0.804913609934 , current acc: 0.805047966812\n",
      "epoch: 131 , training loss: 0.591671077872 , training acc: 0.868982727129 , valid loss: 0.793123232884 , valid acc: 0.805047966812\n",
      "epoch: 131 , pass acc: 0.805047966812 , current acc: 0.805115145193\n",
      "epoch: 132 , training loss: 0.590696145423 , training acc: 0.869280231243 , valid loss: 0.792952059813 , valid acc: 0.805115145193\n",
      "epoch: 132 , pass acc: 0.805115145193 , current acc: 0.805182324433\n",
      "epoch: 133 , training loss: 0.589733168206 , training acc: 0.869510558997 , valid loss: 0.792789072571 , valid acc: 0.805182324433\n",
      "epoch: 133 , pass acc: 0.805182324433 , current acc: 0.805230308174\n",
      "epoch: 134 , training loss: 0.588781960676 , training acc: 0.869804864271 , valid loss: 0.792634158268 , valid acc: 0.805230308174\n",
      "epoch: 134 , pass acc: 0.805230308174 , current acc: 0.805345472013\n",
      "epoch: 135 , training loss: 0.587842328334 , training acc: 0.869945619191 , valid loss: 0.792486911228 , valid acc: 0.805345472013\n",
      "epoch: 136 , training loss: 0.586914100168 , training acc: 0.870236725282 , valid loss: 0.792346978371 , valid acc: 0.805316679251\n",
      "epoch: 137 , training loss: 0.585997107371 , training acc: 0.870502242143 , valid loss: 0.792214195153 , valid acc: 0.80533587412\n",
      "epoch: 137 , pass acc: 0.805345472013 , current acc: 0.80546063425\n",
      "epoch: 138 , training loss: 0.585091152837 , training acc: 0.870754960345 , valid loss: 0.792088758236 , valid acc: 0.80546063425\n",
      "epoch: 138 , pass acc: 0.80546063425 , current acc: 0.805633379207\n",
      "epoch: 139 , training loss: 0.584196106333 , training acc: 0.870991684597 , valid loss: 0.791970103693 , valid acc: 0.805633379207\n",
      "epoch: 139 , pass acc: 0.805633379207 , current acc: 0.805710153765\n",
      "epoch: 140 , training loss: 0.583311784176 , training acc: 0.871206016073 , valid loss: 0.791858173492 , valid acc: 0.805710153765\n",
      "epoch: 141 , training loss: 0.582438023233 , training acc: 0.871433143004 , valid loss: 0.791752860779 , valid acc: 0.805662169681\n",
      "epoch: 141 , pass acc: 0.805710153765 , current acc: 0.805738945669\n",
      "epoch: 142 , training loss: 0.581574674903 , training acc: 0.87174024543 , valid loss: 0.791654130633 , valid acc: 0.805738945669\n",
      "epoch: 142 , pass acc: 0.805738945669 , current acc: 0.805902093878\n",
      "epoch: 143 , training loss: 0.580721577104 , training acc: 0.871948179589 , valid loss: 0.791561989661 , valid acc: 0.805902093878\n",
      "epoch: 143 , pass acc: 0.805902093878 , current acc: 0.806007658909\n",
      "epoch: 144 , training loss: 0.579878569955 , training acc: 0.872172107547 , valid loss: 0.791476063302 , valid acc: 0.806007658909\n",
      "epoch: 144 , pass acc: 0.806007658909 , current acc: 0.806199597649\n",
      "epoch: 145 , training loss: 0.579045492672 , training acc: 0.872428025352 , valid loss: 0.791396460126 , valid acc: 0.806199597649\n",
      "epoch: 145 , pass acc: 0.806199597649 , current acc: 0.806276373751\n",
      "epoch: 146 , training loss: 0.57822219361 , training acc: 0.87268074487 , valid loss: 0.7913228445 , valid acc: 0.806276373751\n",
      "epoch: 147 , training loss: 0.57740853253 , training acc: 0.872955856839 , valid loss: 0.791255394093 , valid acc: 0.806257179683\n",
      "epoch: 148 , training loss: 0.576604348722 , training acc: 0.873211775998 , valid loss: 0.791193776934 , valid acc: 0.80613241818\n",
      "epoch: 149 , training loss: 0.575809490105 , training acc: 0.873451697563 , valid loss: 0.791138157191 , valid acc: 0.806257177909\n",
      "epoch: 149 , pass acc: 0.806276373751 , current acc: 0.806439520473\n",
      "epoch: 150 , training loss: 0.575023800754 , training acc: 0.873627642123 , valid loss: 0.791088159963 , valid acc: 0.806439520473\n",
      "epoch: 150 , pass acc: 0.806439520473 , current acc: 0.806583473298\n",
      "epoch: 151 , training loss: 0.574247131678 , training acc: 0.873739607217 , valid loss: 0.79104380567 , valid acc: 0.806583473298\n",
      "epoch: 151 , pass acc: 0.806583473298 , current acc: 0.80665065248\n",
      "epoch: 152 , training loss: 0.573479323855 , training acc: 0.873902754549 , valid loss: 0.791005046034 , valid acc: 0.80665065248\n",
      "epoch: 152 , pass acc: 0.80665065248 , current acc: 0.806775412438\n",
      "epoch: 153 , training loss: 0.57272024528 , training acc: 0.874171468572 , valid loss: 0.790971801772 , valid acc: 0.806775412438\n",
      "epoch: 153 , pass acc: 0.806775412438 , current acc: 0.806909771318\n",
      "epoch: 154 , training loss: 0.571969728066 , training acc: 0.874462575636 , valid loss: 0.790943899762 , valid acc: 0.806909771318\n",
      "epoch: 154 , pass acc: 0.806909771318 , current acc: 0.806957755402\n",
      "epoch: 155 , training loss: 0.571227611713 , training acc: 0.874696100914 , valid loss: 0.790921418314 , valid acc: 0.806957755402\n",
      "epoch: 156 , training loss: 0.570493782217 , training acc: 0.874916830794 , valid loss: 0.79090423004 , valid acc: 0.806919366979\n",
      "epoch: 157 , training loss: 0.56976802383 , training acc: 0.875195140859 , valid loss: 0.790892796359 , valid acc: 0.806938561906\n",
      "epoch: 157 , pass acc: 0.806957755402 , current acc: 0.807082514502\n",
      "epoch: 158 , training loss: 0.569050212663 , training acc: 0.875403074465 , valid loss: 0.790888016261 , valid acc: 0.807082514502\n",
      "epoch: 158 , pass acc: 0.807082514502 , current acc: 0.807130500302\n",
      "epoch: 159 , training loss: 0.568340309598 , training acc: 0.875556626155 , valid loss: 0.79089071952 , valid acc: 0.807130500302\n",
      "epoch: 159 , pass acc: 0.807130500302 , current acc: 0.807284050905\n",
      "epoch: 160 , training loss: 0.567638407141 , training acc: 0.875710177387 , valid loss: 0.790898814025 , valid acc: 0.807284050905\n",
      "epoch: 160 , pass acc: 0.807284050905 , current acc: 0.807341632939\n",
      "epoch: 161 , training loss: 0.566944168677 , training acc: 0.875918111241 , valid loss: 0.790911632027 , valid acc: 0.807341632939\n",
      "epoch: 161 , pass acc: 0.807341632939 , current acc: 0.807351227628\n",
      "epoch: 162 , training loss: 0.566257446699 , training acc: 0.875982090173 , valid loss: 0.790929304232 , valid acc: 0.807351227628\n",
      "epoch: 163 , training loss: 0.565578123782 , training acc: 0.876170829997 , valid loss: 0.790951959631 , valid acc: 0.807303244116\n",
      "epoch: 164 , training loss: 0.564906066645 , training acc: 0.876369166321 , valid loss: 0.790979257043 , valid acc: 0.807284050047\n",
      "epoch: 165 , training loss: 0.564241203145 , training acc: 0.876685864694 , valid loss: 0.791011149561 , valid acc: 0.807178483186\n",
      "epoch: 166 , training loss: 0.563583421492 , training acc: 0.876938584289 , valid loss: 0.791047834789 , valid acc: 0.807092110392\n",
      "epoch: 167 , training loss: 0.562932628445 , training acc: 0.877197701391 , valid loss: 0.791089059381 , valid acc: 0.807111305605\n",
      "epoch: 168 , training loss: 0.562288711964 , training acc: 0.877319263138 , valid loss: 0.791134632509 , valid acc: 0.807264856322\n",
      "epoch: 169 , training loss: 0.561651589298 , training acc: 0.877485609158 , valid loss: 0.791184183417 , valid acc: 0.807168887295\n",
      "epoch: 170 , training loss: 0.561021173488 , training acc: 0.877715935519 , valid loss: 0.791237662798 , valid acc: 0.807274453985\n",
      "epoch: 170 , pass acc: 0.807351227628 , current acc: 0.807408812064\n",
      "epoch: 171 , training loss: 0.560397309013 , training acc: 0.877930267358 , valid loss: 0.791295045969 , valid acc: 0.807408812064\n",
      "epoch: 171 , pass acc: 0.807408812064 , current acc: 0.807456793918\n",
      "epoch: 172 , training loss: 0.559779980571 , training acc: 0.878144599388 , valid loss: 0.791355986684 , valid acc: 0.807456793918\n",
      "epoch: 172 , pass acc: 0.807456793918 , current acc: 0.807629541278\n",
      "epoch: 173 , training loss: 0.559169019129 , training acc: 0.878272558776 , valid loss: 0.791420042143 , valid acc: 0.807629541278\n",
      "epoch: 174 , training loss: 0.55856433772 , training acc: 0.878445302666 , valid loss: 0.791487733485 , valid acc: 0.807495183771\n",
      "epoch: 175 , training loss: 0.557965860591 , training acc: 0.878592455246 , valid loss: 0.791558339992 , valid acc: 0.807552765404\n",
      "epoch: 176 , training loss: 0.557373501241 , training acc: 0.878819582463 , valid loss: 0.791631799627 , valid acc: 0.807629540134\n",
      "epoch: 176 , pass acc: 0.807629541278 , current acc: 0.80771591144\n",
      "epoch: 177 , training loss: 0.556787167204 , training acc: 0.879033915122 , valid loss: 0.791708430036 , valid acc: 0.80771591144\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 177 , pass acc: 0.80771591144 , current acc: 0.807744703572\n",
      "epoch: 178 , training loss: 0.556206755335 , training acc: 0.879245047701 , valid loss: 0.791787581172 , valid acc: 0.807744703572\n",
      "epoch: 179 , training loss: 0.555632170755 , training acc: 0.879436985354 , valid loss: 0.791869146267 , valid acc: 0.807658331637\n",
      "epoch: 180 , training loss: 0.555063358005 , training acc: 0.879712098658 , valid loss: 0.791953244273 , valid acc: 0.807648734202\n",
      "epoch: 180 , pass acc: 0.807744703572 , current acc: 0.807792687256\n",
      "epoch: 181 , training loss: 0.554500225533 , training acc: 0.87998721034 , valid loss: 0.792039563297 , valid acc: 0.807792687256\n",
      "epoch: 182 , training loss: 0.553942674515 , training acc: 0.880115168795 , valid loss: 0.792128325853 , valid acc: 0.807744703057\n",
      "epoch: 182 , pass acc: 0.807792687256 , current acc: 0.807898253603\n",
      "epoch: 183 , training loss: 0.553390664221 , training acc: 0.880287914381 , valid loss: 0.792218853582 , valid acc: 0.807898253603\n",
      "epoch: 184 , training loss: 0.552844091096 , training acc: 0.880537434469 , valid loss: 0.792311101122 , valid acc: 0.807792687542\n",
      "epoch: 185 , training loss: 0.552302851911 , training acc: 0.880745368227 , valid loss: 0.79240500804 , valid acc: 0.80784067237\n",
      "epoch: 185 , pass acc: 0.807898253603 , current acc: 0.808023014819\n",
      "epoch: 186 , training loss: 0.55176689828 , training acc: 0.880892520369 , valid loss: 0.792500410365 , valid acc: 0.808023014819\n",
      "epoch: 186 , pass acc: 0.808023014819 , current acc: 0.808090193944\n",
      "epoch: 187 , training loss: 0.551236123571 , training acc: 0.881090858104 , valid loss: 0.792597027006 , valid acc: 0.808090193944\n",
      "epoch: 188 , training loss: 0.550710490926 , training acc: 0.8812987907 , valid loss: 0.792696427189 , valid acc: 0.808051805407\n",
      "epoch: 188 , pass acc: 0.808090193944 , current acc: 0.808099791265\n",
      "epoch: 189 , training loss: 0.550190253959 , training acc: 0.881519520123 , valid loss: 0.792799893988 , valid acc: 0.808099791265\n",
      "epoch: 189 , pass acc: 0.808099791265 , current acc: 0.808147775006\n",
      "epoch: 190 , training loss: 0.549675366157 , training acc: 0.881666672931 , valid loss: 0.792904701434 , valid acc: 0.808147775006\n",
      "epoch: 190 , pass acc: 0.808147775006 , current acc: 0.808205356582\n",
      "epoch: 191 , training loss: 0.549165465619 , training acc: 0.881810627759 , valid loss: 0.793011318985 , valid acc: 0.808205356582\n",
      "epoch: 191 , pass acc: 0.808205356582 , current acc: 0.808310921785\n",
      "epoch: 192 , training loss: 0.548660526463 , training acc: 0.88201536201 , valid loss: 0.793119186632 , valid acc: 0.808310921785\n",
      "epoch: 192 , pass acc: 0.808310921785 , current acc: 0.808426086024\n",
      "epoch: 193 , training loss: 0.548160485756 , training acc: 0.88208893933 , valid loss: 0.793228577601 , valid acc: 0.808426086024\n",
      "epoch: 194 , training loss: 0.547665268917 , training acc: 0.882325663295 , valid loss: 0.793339307958 , valid acc: 0.808387697487\n",
      "epoch: 194 , pass acc: 0.808426086024 , current acc: 0.808454877298\n",
      "epoch: 195 , training loss: 0.547174868432 , training acc: 0.882450423119 , valid loss: 0.793451234527 , valid acc: 0.808454877298\n",
      "epoch: 196 , training loss: 0.54668913794 , training acc: 0.882597575795 , valid loss: 0.793564702636 , valid acc: 0.808406892928\n",
      "epoch: 197 , training loss: 0.546208159966 , training acc: 0.882680750244 , valid loss: 0.793679305207 , valid acc: 0.808320520478\n",
      "epoch: 198 , training loss: 0.545731833728 , training acc: 0.882875886413 , valid loss: 0.793794920057 , valid acc: 0.808330116482\n",
      "epoch: 199 , training loss: 0.545260108984 , training acc: 0.883032636847 , valid loss: 0.793911438957 , valid acc: 0.808378103084\n",
      "epoch: 200 , training loss: 0.544792985047 , training acc: 0.883211779655 , valid loss: 0.79402887887 , valid acc: 0.808445280607\n",
      "epoch: 200 , pass acc: 0.808454877298 , current acc: 0.808598831611\n",
      "epoch: 201 , training loss: 0.544330411522 , training acc: 0.883352533908 , valid loss: 0.794146976235 , valid acc: 0.808598831611\n",
      "epoch: 201 , pass acc: 0.808598831611 , current acc: 0.808646815237\n",
      "epoch: 202 , training loss: 0.54387235856 , training acc: 0.883512482857 , valid loss: 0.794265859044 , valid acc: 0.808646815237\n",
      "epoch: 202 , pass acc: 0.808646815237 , current acc: 0.808704398873\n",
      "epoch: 203 , training loss: 0.543418801115 , training acc: 0.883675631696 , valid loss: 0.794385553722 , valid acc: 0.808704398873\n",
      "epoch: 204 , training loss: 0.542969707154 , training acc: 0.88381318667 , valid loss: 0.794505717058 , valid acc: 0.80866601085\n",
      "epoch: 205 , training loss: 0.542525007873 , training acc: 0.883969936627 , valid loss: 0.794626556351 , valid acc: 0.808598831439\n",
      "epoch: 206 , training loss: 0.542084691497 , training acc: 0.88411708997 , valid loss: 0.794747461799 , valid acc: 0.808627621112\n",
      "epoch: 207 , training loss: 0.541648675931 , training acc: 0.88428663664 , valid loss: 0.794869008083 , valid acc: 0.808541249863\n",
      "epoch: 208 , training loss: 0.541216940632 , training acc: 0.884417793896 , valid loss: 0.794990863143 , valid acc: 0.808454876555\n",
      "epoch: 209 , training loss: 0.540789418385 , training acc: 0.884529758686 , valid loss: 0.795113324947 , valid acc: 0.808406893386\n",
      "epoch: 210 , training loss: 0.54036602437 , training acc: 0.884721696987 , valid loss: 0.795236066727 , valid acc: 0.808320519448\n",
      "epoch: 211 , training loss: 0.539946718374 , training acc: 0.884971216235 , valid loss: 0.795359149232 , valid acc: 0.808262937643\n",
      "epoch: 212 , training loss: 0.539531479718 , training acc: 0.885159956307 , valid loss: 0.795483089998 , valid acc: 0.808253341639\n",
      "epoch: 213 , training loss: 0.539120184569 , training acc: 0.885275120699 , valid loss: 0.795607156379 , valid acc: 0.808301326753\n",
      "epoch: 214 , training loss: 0.538712777164 , training acc: 0.88534549752 , valid loss: 0.795731677003 , valid acc: 0.80820535601\n",
      "epoch: 215 , training loss: 0.538309216108 , training acc: 0.885527838748 , valid loss: 0.795856866835 , valid acc: 0.808166969932\n",
      "epoch: 216 , training loss: 0.537909464068 , training acc: 0.885665394428 , valid loss: 0.795982447673 , valid acc: 0.808234148257\n",
      "epoch: 217 , training loss: 0.537513429634 , training acc: 0.885866931231 , valid loss: 0.79610837261 , valid acc: 0.808253341124\n",
      "epoch: 218 , training loss: 0.537121047586 , training acc: 0.885978895506 , valid loss: 0.796235000046 , valid acc: 0.80818616177\n",
      "epoch: 219 , training loss: 0.536732286997 , training acc: 0.886122849132 , valid loss: 0.796361869448 , valid acc: 0.808099790693\n",
      "epoch: 220 , training loss: 0.536347100064 , training acc: 0.88623161306 , valid loss: 0.796489044223 , valid acc: 0.808128580994\n",
      "epoch: 221 , training loss: 0.535965369 , training acc: 0.886359573154 , valid loss: 0.796616834379 , valid acc: 0.808147773232\n",
      "epoch: 222 , training loss: 0.535587115036 , training acc: 0.886493929708 , valid loss: 0.796744917725 , valid acc: 0.808205356811\n",
      "epoch: 223 , training loss: 0.535212274276 , training acc: 0.886564308321 , valid loss: 0.796873355467 , valid acc: 0.808291729718\n",
      "break epoch: 223\n"
     ]
    }
   ],
   "source": [
    "tf.reset_default_graph()\n",
    "sess = tf.InteractiveSession()\n",
    "model = Model(dimension, 128, len(label), learning_rate)\n",
    "sess.run(tf.global_variables_initializer())\n",
    "saver = tf.train.Saver(tf.global_variables())\n",
    "EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 20, 0, 0, 0\n",
    "batch_size = 200\n",
    "while True:\n",
    "    if CURRENT_CHECKPOINT == EARLY_STOPPING:\n",
    "        print('break epoch:', EPOCH)\n",
    "        break\n",
    "    train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n",
    "    for i in range(0, (train_X.shape[0] // batch) * batch, batch):\n",
    "        batch_x = np.zeros((batch, dimension))\n",
    "        batch_y = np.zeros((batch, len(label)))\n",
    "        for k in range(batch):\n",
    "            tokens = train_X[i + k].split()\n",
    "            for no, text in enumerate(tokens):\n",
    "                try:\n",
    "                    batch_x[k, :] += vectors[dictionary[text], :]\n",
    "                except:\n",
    "                    continue\n",
    "            batch_y[k, int(train_Y[i + k])] = 1.0\n",
    "        loss, _ = sess.run([model.cost, model.optimizer], feed_dict = {model.X : batch_x, model.Y : batch_y})\n",
    "        train_loss += loss\n",
    "        train_acc += sess.run(model.accuracy, feed_dict = {model.X : batch_x, model.Y : batch_y})\n",
    "    \n",
    "    for i in range(0, (test_X.shape[0] // batch) * batch, batch):\n",
    "        batch_x = np.zeros((batch, dimension))\n",
    "        batch_y = np.zeros((batch, len(label)))\n",
    "        for k in range(batch):\n",
    "            tokens = test_X[i + k].split()\n",
    "            for no, text in enumerate(tokens):\n",
    "                try:\n",
    "                    batch_x[k, :] += vectors[dictionary[text], :]\n",
    "                except:\n",
    "                    continue\n",
    "            batch_y[k, int(test_Y[i + k])] = 1.0\n",
    "        loss, acc = sess.run([model.cost, model.accuracy], feed_dict = {model.X : batch_x, model.Y : batch_y})\n",
    "        test_loss += loss\n",
    "        test_acc += acc\n",
    "        \n",
    "    train_loss /= (train_X.shape[0] // batch)\n",
    "    train_acc /= (train_X.shape[0] // batch)\n",
    "    test_loss /= (test_X.shape[0] // batch)\n",
    "    test_acc /= (test_X.shape[0] // batch)\n",
    "    if test_acc > CURRENT_ACC:\n",
    "        print('epoch:', EPOCH, ', pass acc:', CURRENT_ACC, ', current acc:', test_acc)\n",
    "        CURRENT_ACC = test_acc\n",
    "        CURRENT_CHECKPOINT = 0\n",
    "        saver.save(sess, os.getcwd() + \"/model-rnn-vector.ckpt\")\n",
    "    else:\n",
    "        CURRENT_CHECKPOINT += 1\n",
    "    EPOCH += 1\n",
    "    print('epoch:', EPOCH, ', training loss:', train_loss, ', training acc:', train_acc, ', valid loss:', test_loss, ', valid acc:', test_acc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
