{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from glob import glob\n",
    "from datetime import datetime"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "DATA_PATH = \"./data/\"\n",
    "TRAIN_PATH = DATA_PATH + \"train.csv\"\n",
    "TEST_PATH = DATA_PATH + \"test.csv\"\n",
    "WORD_EMBED_PATH = DATA_PATH + \"word_embed.txt\"\n",
    "CHAR_EMBED_PATH = DATA_PATH + \"char_embed.txt\"\n",
    "QUEST_PATH = DATA_PATH + \"question.csv\"\n",
    "\n",
    "train_data = pd.read_csv(TRAIN_PATH)\n",
    "test_data = pd.read_csv(TEST_PATH)\n",
    "question_data = pd.read_csv(QUEST_PATH)\n",
    "word_embedding_data = pd.read_csv(WORD_EMBED_PATH, delimiter=\" \", header=None, index_col=0)\n",
    "char_embedding_data = pd.read_csv(CHAR_EMBED_PATH, delimiter=\" \", header=None, index_col=0)\n",
    "\n",
    "question_data[\"words\"] = question_data[\"words\"].str.split(\" \")\n",
    "question_data[\"chars\"] = question_data[\"chars\"].str.split(\" \")\n",
    "\n",
    "label = train_data[\"label\"].values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "((10001, 300), (3049, 300))"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from keras.preprocessing.text import Tokenizer\n",
    "\n",
    "MAX_COUNT = 10000\n",
    "\n",
    "word_tokenizer = Tokenizer(MAX_COUNT)\n",
    "word_tokenizer.fit_on_texts(question_data[\"words\"])\n",
    "\n",
    "word_embedding_data = np.concatenate(\n",
    "    (\n",
    "        np.zeros(shape=(1, word_embedding_data.shape[1]), dtype=np.float64),\n",
    "        word_embedding_data.loc[list(word_tokenizer.word_index.keys())[:MAX_COUNT]].values\n",
    "    ),\n",
    "    axis=0\n",
    ")\n",
    "\n",
    "char_tokenizer = Tokenizer(MAX_COUNT)\n",
    "char_tokenizer.fit_on_texts(question_data[\"chars\"])\n",
    "\n",
    "char_embedding_data = np.concatenate(\n",
    "    (\n",
    "        np.zeros(shape=(1, char_embedding_data.shape[1]), dtype=np.float64),\n",
    "        char_embedding_data.loc[list(char_tokenizer.word_index.keys())[:MAX_COUNT]].values\n",
    "    ),\n",
    "    axis=0\n",
    ")\n",
    "\n",
    "word_embedding_data.shape, char_embedding_data.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((254386, 30),\n",
       " (254386, 30),\n",
       " (172956, 30),\n",
       " (172956, 30),\n",
       " (254386, 30),\n",
       " (254386, 30),\n",
       " (172956, 30),\n",
       " (172956, 30))"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from keras.preprocessing.sequence import pad_sequences\n",
    "\n",
    "SEQ_LEN = 30\n",
    "\n",
    "def gen_word_data(data):\n",
    "    seq_word1 = word_tokenizer.texts_to_sequences(data.merge(question_data, how=\"left\", left_on=\"q1\", right_on=\"qid\")[\"words\"])\n",
    "    seq_word2 = word_tokenizer.texts_to_sequences(data.merge(question_data, how=\"left\", left_on=\"q2\", right_on=\"qid\")[\"words\"])\n",
    "    return pad_sequences(seq_word1, maxlen=SEQ_LEN, padding=\"pre\",truncating=\"pre\"), \\\n",
    "        pad_sequences(seq_word2, maxlen=SEQ_LEN, padding=\"pre\",truncating=\"pre\")\n",
    "    \n",
    "def gen_char_data(data):\n",
    "    seq_char1 = char_tokenizer.texts_to_sequences(data.merge(question_data, how=\"left\", left_on=\"q1\", right_on=\"qid\")[\"chars\"])\n",
    "    seq_char2 = char_tokenizer.texts_to_sequences(data.merge(question_data, how=\"left\", left_on=\"q2\", right_on=\"qid\")[\"chars\"])\n",
    "    return pad_sequences(seq_char1, maxlen=SEQ_LEN, padding=\"pre\",truncating=\"pre\"), \\\n",
    "        pad_sequences(seq_char2, maxlen=SEQ_LEN, padding=\"pre\",truncating=\"pre\")\n",
    "\n",
    "word1, word2 = gen_word_data(train_data)\n",
    "char1, char2 = gen_char_data(train_data)\n",
    "test_word1, test_word2 = gen_word_data(test_data)\n",
    "test_char1, test_char2 = gen_char_data(test_data)\n",
    "\n",
    "word1.shape, word2.shape, test_word1.shape, test_word2.shape, char1.shape, char2.shape, test_char1.shape, test_char2.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from keras.models import Model\n",
    "from keras.layers.merge import concatenate\n",
    "from keras.callbacks import EarlyStopping, ModelCheckpoint\n",
    "from keras.layers import LSTM, Bidirectional, TimeDistributed\n",
    "from keras.layers import Conv1D, MaxPool1D, GlobalAveragePooling1D\n",
    "from keras.layers import Input, Embedding, Dropout, BatchNormalization, Dense, Flatten, Lambda, K"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# general\n",
    "NUM_EPOCHES = 30\n",
    "BATCH_SIZE = 1024\n",
    "DENSE_SIZE = 300 # 512\n",
    "DROP_RATE = 0.3\n",
    "\n",
    "# cnn\n",
    "CONV_LEN_1 = 128\n",
    "CONV_LEN_2 = 128\n",
    "CONV_LEN_3 = 128\n",
    "CONV_LEN_4 = 128\n",
    "CONV_LEN_5 = 128\n",
    "CONV_LEN_6 = 128\n",
    "CONV_LEN = CONV_LEN_1 + CONV_LEN_2 + CONV_LEN_3 + CONV_LEN_4 + CONV_LEN_5 + CONV_LEN_6\n",
    "\n",
    "# lstm\n",
    "LSTM_SIZE_1 = 256\n",
    "LSTM_SIZE_2 = 256\n",
    "DROP_RATE_LSTM = 0.3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def cnn_layer_1(input1, input2, kernel_size, filters):\n",
    "    conv = Conv1D(filters=filters, kernel_size=kernel_size, padding=\"same\", activation=\"relu\")\n",
    "    \n",
    "    conv_a = conv(input1)\n",
    "    conv_a = GlobalAveragePooling1D()(conv_a)\n",
    "    \n",
    "    conv_b = conv(input2)\n",
    "    conv_b = GlobalAveragePooling1D()(conv_b)\n",
    "    return conv_a, conv_b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\util\\deprecation.py:497: calling conv1d (from tensorflow.python.ops.nn_ops) with data_format=NHWC is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "`NHWC` for data_format is deprecated, use `NWC` instead\n",
      "Train on 228946 samples, validate on 25440 samples\n",
      "Epoch 1/50\n",
      "228946/228946 [==============================] - 138s 603us/step - loss: 0.5012 - acc: 0.7589 - val_loss: 0.3626 - val_acc: 0.8330\n",
      "Epoch 2/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.3604 - acc: 0.8375 - val_loss: 0.3043 - val_acc: 0.8669\n",
      "Epoch 3/50\n",
      "228946/228946 [==============================] - 134s 583us/step - loss: 0.3070 - acc: 0.8653 - val_loss: 0.2745 - val_acc: 0.8809\n",
      "Epoch 4/50\n",
      "228946/228946 [==============================] - 134s 584us/step - loss: 0.2773 - acc: 0.8797 - val_loss: 0.2541 - val_acc: 0.8888\n",
      "Epoch 5/50\n",
      "228946/228946 [==============================] - 134s 585us/step - loss: 0.2579 - acc: 0.8887 - val_loss: 0.2406 - val_acc: 0.8991\n",
      "Epoch 6/50\n",
      "228946/228946 [==============================] - 135s 589us/step - loss: 0.2422 - acc: 0.8959 - val_loss: 0.2224 - val_acc: 0.9072\n",
      "Epoch 7/50\n",
      "228946/228946 [==============================] - 137s 600us/step - loss: 0.2294 - acc: 0.9017 - val_loss: 0.2164 - val_acc: 0.9086\n",
      "Epoch 8/50\n",
      "228946/228946 [==============================] - 138s 604us/step - loss: 0.2193 - acc: 0.9071 - val_loss: 0.2128 - val_acc: 0.9122\n",
      "Epoch 9/50\n",
      "228946/228946 [==============================] - 138s 603us/step - loss: 0.2107 - acc: 0.9110 - val_loss: 0.2056 - val_acc: 0.9149\n",
      "Epoch 10/50\n",
      "228946/228946 [==============================] - 138s 603us/step - loss: 0.2035 - acc: 0.9139 - val_loss: 0.2085 - val_acc: 0.9125\n",
      "Epoch 11/50\n",
      "228946/228946 [==============================] - 138s 602us/step - loss: 0.1997 - acc: 0.9166 - val_loss: 0.1992 - val_acc: 0.9185\n",
      "Epoch 12/50\n",
      "228946/228946 [==============================] - 138s 603us/step - loss: 0.1937 - acc: 0.9186 - val_loss: 0.1964 - val_acc: 0.9196\n",
      "Epoch 13/50\n",
      "228946/228946 [==============================] - 138s 602us/step - loss: 0.1885 - acc: 0.9208 - val_loss: 0.1946 - val_acc: 0.9215\n",
      "Epoch 14/50\n",
      "228946/228946 [==============================] - 137s 601us/step - loss: 0.1835 - acc: 0.9228 - val_loss: 0.1958 - val_acc: 0.9214\n",
      "Epoch 15/50\n",
      "228946/228946 [==============================] - 138s 602us/step - loss: 0.1789 - acc: 0.9251 - val_loss: 0.1873 - val_acc: 0.9233\n",
      "Epoch 16/50\n",
      "228946/228946 [==============================] - 139s 607us/step - loss: 0.1772 - acc: 0.9260 - val_loss: 0.1921 - val_acc: 0.9239\n",
      "Epoch 17/50\n",
      "228946/228946 [==============================] - 138s 602us/step - loss: 0.1742 - acc: 0.9271 - val_loss: 0.1914 - val_acc: 0.9249\n",
      "Epoch 18/50\n",
      "228946/228946 [==============================] - 137s 600us/step - loss: 0.1714 - acc: 0.9285 - val_loss: 0.1911 - val_acc: 0.9225\n",
      "Epoch 19/50\n",
      "228946/228946 [==============================] - 138s 601us/step - loss: 0.1665 - acc: 0.9307 - val_loss: 0.1912 - val_acc: 0.9245\n",
      "Epoch 20/50\n",
      "228946/228946 [==============================] - 137s 599us/step - loss: 0.1660 - acc: 0.9306 - val_loss: 0.1851 - val_acc: 0.9258\n",
      "Epoch 21/50\n",
      "228946/228946 [==============================] - 137s 597us/step - loss: 0.1638 - acc: 0.9314 - val_loss: 0.1887 - val_acc: 0.9250\n",
      "Epoch 22/50\n",
      "228946/228946 [==============================] - 137s 600us/step - loss: 0.1613 - acc: 0.9334 - val_loss: 0.1894 - val_acc: 0.9231\n",
      "Epoch 23/50\n",
      "228946/228946 [==============================] - 137s 600us/step - loss: 0.1573 - acc: 0.9348 - val_loss: 0.1957 - val_acc: 0.9244\n",
      "Epoch 24/50\n",
      "228946/228946 [==============================] - 137s 599us/step - loss: 0.1566 - acc: 0.9345 - val_loss: 0.1905 - val_acc: 0.9255\n",
      "Epoch 25/50\n",
      "228946/228946 [==============================] - 137s 601us/step - loss: 0.1561 - acc: 0.9353 - val_loss: 0.1909 - val_acc: 0.9265\n",
      "Epoch 26/50\n",
      "228946/228946 [==============================] - 137s 600us/step - loss: 0.1533 - acc: 0.9362 - val_loss: 0.1870 - val_acc: 0.9270\n",
      "Epoch 27/50\n",
      "228946/228946 [==============================] - 137s 599us/step - loss: 0.1531 - acc: 0.9366 - val_loss: 0.1909 - val_acc: 0.9239\n",
      "Epoch 28/50\n",
      "228946/228946 [==============================] - 136s 595us/step - loss: 0.1497 - acc: 0.9378 - val_loss: 0.1781 - val_acc: 0.9280\n",
      "Epoch 29/50\n",
      "228946/228946 [==============================] - 137s 598us/step - loss: 0.1486 - acc: 0.9384 - val_loss: 0.1889 - val_acc: 0.9259\n",
      "Epoch 30/50\n",
      "228946/228946 [==============================] - 137s 600us/step - loss: 0.1468 - acc: 0.9387 - val_loss: 0.1835 - val_acc: 0.9286\n",
      "Epoch 31/50\n",
      "228946/228946 [==============================] - 137s 600us/step - loss: 0.1456 - acc: 0.9403 - val_loss: 0.1904 - val_acc: 0.9266\n",
      "Epoch 32/50\n",
      "228946/228946 [==============================] - 138s 601us/step - loss: 0.1453 - acc: 0.9401 - val_loss: 0.1865 - val_acc: 0.9270\n",
      "Epoch 33/50\n",
      "228946/228946 [==============================] - 138s 602us/step - loss: 0.1429 - acc: 0.9409 - val_loss: 0.1902 - val_acc: 0.9260\n",
      "Epoch 34/50\n",
      "228946/228946 [==============================] - 138s 604us/step - loss: 0.1427 - acc: 0.9408 - val_loss: 0.1916 - val_acc: 0.9276\n",
      "Epoch 35/50\n",
      "228946/228946 [==============================] - 139s 606us/step - loss: 0.1399 - acc: 0.9423 - val_loss: 0.1949 - val_acc: 0.9274\n",
      "Epoch 36/50\n",
      "228946/228946 [==============================] - 138s 604us/step - loss: 0.1390 - acc: 0.9422 - val_loss: 0.1905 - val_acc: 0.9305\n",
      "Epoch 37/50\n",
      "228946/228946 [==============================] - 139s 606us/step - loss: 0.1382 - acc: 0.9434 - val_loss: 0.1936 - val_acc: 0.9286\n",
      "Epoch 38/50\n",
      "228946/228946 [==============================] - 138s 604us/step - loss: 0.1370 - acc: 0.9435 - val_loss: 0.1983 - val_acc: 0.9273\n",
      "load model ./log/20180706-205508.multi_lstm_cnn_char.028.hdf5\n",
      "Train on 228946 samples, validate on 25440 samples\n",
      "Epoch 1/50\n",
      "228946/228946 [==============================] - 138s 604us/step - loss: 0.4998 - acc: 0.7611 - val_loss: 0.3598 - val_acc: 0.8404\n",
      "Epoch 2/50\n",
      "228946/228946 [==============================] - 133s 583us/step - loss: 0.3596 - acc: 0.8381 - val_loss: 0.2962 - val_acc: 0.8692\n",
      "Epoch 3/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.3067 - acc: 0.8650 - val_loss: 0.2641 - val_acc: 0.8835\n",
      "Epoch 4/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.2795 - acc: 0.8788 - val_loss: 0.2479 - val_acc: 0.8947\n",
      "Epoch 5/50\n",
      "228946/228946 [==============================] - 133s 583us/step - loss: 0.2586 - acc: 0.8879 - val_loss: 0.2425 - val_acc: 0.8936\n",
      "Epoch 6/50\n",
      "228946/228946 [==============================] - 133s 583us/step - loss: 0.2416 - acc: 0.8965 - val_loss: 0.2306 - val_acc: 0.9009\n",
      "Epoch 7/50\n",
      "228946/228946 [==============================] - 133s 583us/step - loss: 0.2313 - acc: 0.9013 - val_loss: 0.2201 - val_acc: 0.9098\n",
      "Epoch 8/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.2210 - acc: 0.9065 - val_loss: 0.2181 - val_acc: 0.9080\n",
      "Epoch 9/50\n",
      "228946/228946 [==============================] - 133s 583us/step - loss: 0.2126 - acc: 0.9095 - val_loss: 0.2045 - val_acc: 0.9137\n",
      "Epoch 10/50\n",
      "228946/228946 [==============================] - 133s 581us/step - loss: 0.2060 - acc: 0.9128 - val_loss: 0.1990 - val_acc: 0.9171\n",
      "Epoch 11/50\n",
      "228946/228946 [==============================] - 133s 583us/step - loss: 0.1994 - acc: 0.9157 - val_loss: 0.2006 - val_acc: 0.9168\n",
      "Epoch 12/50\n",
      "228946/228946 [==============================] - 133s 583us/step - loss: 0.1936 - acc: 0.9184 - val_loss: 0.1981 - val_acc: 0.9188\n",
      "Epoch 13/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1884 - acc: 0.9213 - val_loss: 0.1962 - val_acc: 0.9199\n",
      "Epoch 14/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1841 - acc: 0.9226 - val_loss: 0.1888 - val_acc: 0.9224\n",
      "Epoch 15/50\n",
      "228946/228946 [==============================] - 134s 584us/step - loss: 0.1817 - acc: 0.9241 - val_loss: 0.2062 - val_acc: 0.9181\n",
      "Epoch 16/50\n",
      "228946/228946 [==============================] - 136s 593us/step - loss: 0.1777 - acc: 0.9251 - val_loss: 0.1865 - val_acc: 0.9249\n",
      "Epoch 17/50\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "228946/228946 [==============================] - 136s 596us/step - loss: 0.1740 - acc: 0.9269 - val_loss: 0.1905 - val_acc: 0.9227\n",
      "Epoch 18/50\n",
      "228946/228946 [==============================] - 136s 595us/step - loss: 0.1704 - acc: 0.9282 - val_loss: 0.1891 - val_acc: 0.9254\n",
      "Epoch 19/50\n",
      "228946/228946 [==============================] - 137s 597us/step - loss: 0.1689 - acc: 0.9295 - val_loss: 0.1853 - val_acc: 0.9276\n",
      "Epoch 20/50\n",
      "228946/228946 [==============================] - 137s 599us/step - loss: 0.1660 - acc: 0.9310 - val_loss: 0.1856 - val_acc: 0.9266\n",
      "Epoch 21/50\n",
      "228946/228946 [==============================] - 137s 597us/step - loss: 0.1643 - acc: 0.9314 - val_loss: 0.1961 - val_acc: 0.9230\n",
      "Epoch 22/50\n",
      "228946/228946 [==============================] - 137s 598us/step - loss: 0.1618 - acc: 0.9325 - val_loss: 0.1824 - val_acc: 0.9283\n",
      "Epoch 23/50\n",
      "228946/228946 [==============================] - 133s 581us/step - loss: 0.1593 - acc: 0.9336 - val_loss: 0.1835 - val_acc: 0.9281\n",
      "Epoch 24/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1576 - acc: 0.9343 - val_loss: 0.1837 - val_acc: 0.9284\n",
      "Epoch 25/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1555 - acc: 0.9354 - val_loss: 0.1829 - val_acc: 0.9290\n",
      "Epoch 26/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1540 - acc: 0.9355 - val_loss: 0.1799 - val_acc: 0.9315\n",
      "Epoch 27/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1519 - acc: 0.9370 - val_loss: 0.1909 - val_acc: 0.9283\n",
      "Epoch 28/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1497 - acc: 0.9381 - val_loss: 0.1966 - val_acc: 0.9262\n",
      "Epoch 29/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1474 - acc: 0.9385 - val_loss: 0.1849 - val_acc: 0.9278\n",
      "Epoch 30/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1479 - acc: 0.9389 - val_loss: 0.1898 - val_acc: 0.9290\n",
      "Epoch 31/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1466 - acc: 0.9389 - val_loss: 0.1870 - val_acc: 0.9269\n",
      "Epoch 32/50\n",
      "228946/228946 [==============================] - 133s 582us/step - loss: 0.1449 - acc: 0.9402 - val_loss: 0.1903 - val_acc: 0.9292\n",
      "Epoch 33/50\n",
      "228946/228946 [==============================] - 134s 583us/step - loss: 0.1440 - acc: 0.9403 - val_loss: 0.1910 - val_acc: 0.9276\n",
      "Epoch 34/50\n",
      "228946/228946 [==============================] - 136s 595us/step - loss: 0.1418 - acc: 0.9413 - val_loss: 0.1959 - val_acc: 0.9279\n",
      "Epoch 35/50\n",
      "228946/228946 [==============================] - 137s 597us/step - loss: 0.1417 - acc: 0.9411 - val_loss: 0.1912 - val_acc: 0.9280\n",
      "Epoch 36/50\n",
      "228946/228946 [==============================] - 137s 597us/step - loss: 0.1413 - acc: 0.9416 - val_loss: 0.1877 - val_acc: 0.9285\n",
      "load model ./log/20180706-222330.multi_lstm_cnn_char.026.hdf5\n",
      "Train on 228947 samples, validate on 25439 samples\n",
      "Epoch 1/50\n",
      "228947/228947 [==============================] - 137s 600us/step - loss: 0.5003 - acc: 0.7604 - val_loss: 0.3939 - val_acc: 0.8276\n",
      "Epoch 2/50\n",
      "228947/228947 [==============================] - 133s 582us/step - loss: 0.3591 - acc: 0.8390 - val_loss: 0.3104 - val_acc: 0.8641\n",
      "Epoch 3/50\n",
      "228947/228947 [==============================] - 133s 582us/step - loss: 0.3080 - acc: 0.8645 - val_loss: 0.2620 - val_acc: 0.8866\n",
      "Epoch 4/50\n",
      "228947/228947 [==============================] - 133s 582us/step - loss: 0.2778 - acc: 0.8794 - val_loss: 0.2573 - val_acc: 0.8896\n",
      "Epoch 5/50\n",
      "228947/228947 [==============================] - 133s 583us/step - loss: 0.2577 - acc: 0.8891 - val_loss: 0.2334 - val_acc: 0.9018\n",
      "Epoch 6/50\n",
      "228947/228947 [==============================] - 134s 583us/step - loss: 0.2424 - acc: 0.8968 - val_loss: 0.2236 - val_acc: 0.9072\n",
      "Epoch 7/50\n",
      "228947/228947 [==============================] - 133s 583us/step - loss: 0.2297 - acc: 0.9023 - val_loss: 0.2206 - val_acc: 0.9064\n",
      "Epoch 8/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.2198 - acc: 0.9071 - val_loss: 0.2113 - val_acc: 0.9116\n",
      "Epoch 9/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.2117 - acc: 0.9105 - val_loss: 0.2092 - val_acc: 0.9127\n",
      "Epoch 10/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.2049 - acc: 0.9140 - val_loss: 0.2033 - val_acc: 0.9168\n",
      "Epoch 11/50\n",
      "228947/228947 [==============================] - 135s 590us/step - loss: 0.1993 - acc: 0.9168 - val_loss: 0.1971 - val_acc: 0.9186\n",
      "Epoch 12/50\n",
      "228947/228947 [==============================] - 136s 593us/step - loss: 0.1927 - acc: 0.9188 - val_loss: 0.1953 - val_acc: 0.9182\n",
      "Epoch 13/50\n",
      "228947/228947 [==============================] - 136s 593us/step - loss: 0.1880 - acc: 0.9211 - val_loss: 0.1932 - val_acc: 0.9217\n",
      "Epoch 14/50\n",
      "228947/228947 [==============================] - 136s 592us/step - loss: 0.1855 - acc: 0.9225 - val_loss: 0.1907 - val_acc: 0.9213\n",
      "Epoch 15/50\n",
      "228947/228947 [==============================] - 136s 593us/step - loss: 0.1804 - acc: 0.9247 - val_loss: 0.1885 - val_acc: 0.9242\n",
      "Epoch 16/50\n",
      "228947/228947 [==============================] - 136s 592us/step - loss: 0.1777 - acc: 0.9256 - val_loss: 0.1889 - val_acc: 0.9229\n",
      "Epoch 17/50\n",
      "228947/228947 [==============================] - 136s 592us/step - loss: 0.1746 - acc: 0.9273 - val_loss: 0.1869 - val_acc: 0.9251\n",
      "Epoch 18/50\n",
      "228947/228947 [==============================] - 133s 581us/step - loss: 0.1727 - acc: 0.9280 - val_loss: 0.1835 - val_acc: 0.9235\n",
      "Epoch 19/50\n",
      "228947/228947 [==============================] - 133s 582us/step - loss: 0.1689 - acc: 0.9300 - val_loss: 0.1919 - val_acc: 0.9218\n",
      "Epoch 20/50\n",
      "228947/228947 [==============================] - 133s 581us/step - loss: 0.1672 - acc: 0.9305 - val_loss: 0.1925 - val_acc: 0.9235\n",
      "Epoch 21/50\n",
      "228947/228947 [==============================] - 133s 581us/step - loss: 0.1650 - acc: 0.9313 - val_loss: 0.1847 - val_acc: 0.9263\n",
      "Epoch 22/50\n",
      "228947/228947 [==============================] - 133s 582us/step - loss: 0.1621 - acc: 0.9325 - val_loss: 0.1856 - val_acc: 0.9251\n",
      "Epoch 23/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.1596 - acc: 0.9338 - val_loss: 0.1873 - val_acc: 0.9266\n",
      "Epoch 24/50\n",
      "228947/228947 [==============================] - 136s 595us/step - loss: 0.1577 - acc: 0.9343 - val_loss: 0.1843 - val_acc: 0.9279\n",
      "Epoch 25/50\n",
      "228947/228947 [==============================] - 137s 597us/step - loss: 0.1554 - acc: 0.9354 - val_loss: 0.1866 - val_acc: 0.9273\n",
      "Epoch 26/50\n",
      "228947/228947 [==============================] - 137s 597us/step - loss: 0.1542 - acc: 0.9357 - val_loss: 0.1821 - val_acc: 0.9287\n",
      "Epoch 27/50\n",
      "228947/228947 [==============================] - 138s 601us/step - loss: 0.1530 - acc: 0.9370 - val_loss: 0.1877 - val_acc: 0.9273\n",
      "Epoch 28/50\n",
      "228947/228947 [==============================] - 137s 599us/step - loss: 0.1510 - acc: 0.9375 - val_loss: 0.1866 - val_acc: 0.9289\n",
      "Epoch 29/50\n",
      "228947/228947 [==============================] - 137s 597us/step - loss: 0.1492 - acc: 0.9384 - val_loss: 0.1820 - val_acc: 0.9285\n",
      "Epoch 30/50\n",
      "228947/228947 [==============================] - 135s 590us/step - loss: 0.1475 - acc: 0.9389 - val_loss: 0.1841 - val_acc: 0.9297\n",
      "Epoch 31/50\n",
      "228947/228947 [==============================] - 136s 592us/step - loss: 0.1456 - acc: 0.9397 - val_loss: 0.1833 - val_acc: 0.9311\n",
      "Epoch 32/50\n",
      "228947/228947 [==============================] - 137s 600us/step - loss: 0.1455 - acc: 0.9401 - val_loss: 0.1935 - val_acc: 0.9285\n",
      "Epoch 33/50\n",
      "228947/228947 [==============================] - 137s 599us/step - loss: 0.1433 - acc: 0.9406 - val_loss: 0.1898 - val_acc: 0.9275\n",
      "Epoch 34/50\n",
      "228947/228947 [==============================] - 137s 598us/step - loss: 0.1414 - acc: 0.9409 - val_loss: 0.1842 - val_acc: 0.9303\n",
      "Epoch 35/50\n",
      "228947/228947 [==============================] - 137s 600us/step - loss: 0.1407 - acc: 0.9418 - val_loss: 0.1867 - val_acc: 0.9297\n",
      "Epoch 36/50\n",
      "228947/228947 [==============================] - 138s 601us/step - loss: 0.1408 - acc: 0.9418 - val_loss: 0.1930 - val_acc: 0.9279\n",
      "Epoch 37/50\n",
      "228947/228947 [==============================] - 137s 600us/step - loss: 0.1397 - acc: 0.9424 - val_loss: 0.1825 - val_acc: 0.9321\n",
      "Epoch 38/50\n",
      "228947/228947 [==============================] - 137s 600us/step - loss: 0.1381 - acc: 0.9428 - val_loss: 0.1852 - val_acc: 0.9312\n",
      "Epoch 39/50\n",
      "228947/228947 [==============================] - 137s 598us/step - loss: 0.1379 - acc: 0.9435 - val_loss: 0.1858 - val_acc: 0.9315\n",
      "load model ./log/20180706-234533.multi_lstm_cnn_char.029.hdf5\n",
      "Train on 228947 samples, validate on 25439 samples\n",
      "Epoch 1/50\n",
      "228947/228947 [==============================] - 138s 601us/step - loss: 0.5080 - acc: 0.7551 - val_loss: 0.3998 - val_acc: 0.8163\n",
      "Epoch 2/50\n",
      "228947/228947 [==============================] - 133s 582us/step - loss: 0.3630 - acc: 0.8364 - val_loss: 0.3053 - val_acc: 0.8676\n",
      "Epoch 3/50\n",
      "228947/228947 [==============================] - 136s 593us/step - loss: 0.3084 - acc: 0.8641 - val_loss: 0.2817 - val_acc: 0.8766\n",
      "Epoch 4/50\n",
      "228947/228947 [==============================] - 137s 598us/step - loss: 0.2800 - acc: 0.8776 - val_loss: 0.2417 - val_acc: 0.8962\n",
      "Epoch 5/50\n",
      "228947/228947 [==============================] - 137s 598us/step - loss: 0.2586 - acc: 0.8888 - val_loss: 0.2347 - val_acc: 0.9020\n",
      "Epoch 6/50\n",
      "228947/228947 [==============================] - 138s 601us/step - loss: 0.2433 - acc: 0.8958 - val_loss: 0.2229 - val_acc: 0.9068\n",
      "Epoch 7/50\n",
      "228947/228947 [==============================] - 137s 600us/step - loss: 0.2320 - acc: 0.9007 - val_loss: 0.2223 - val_acc: 0.9081\n",
      "Epoch 8/50\n",
      "228947/228947 [==============================] - 137s 600us/step - loss: 0.2219 - acc: 0.9053 - val_loss: 0.2088 - val_acc: 0.9134\n",
      "Epoch 9/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.2140 - acc: 0.9092 - val_loss: 0.2035 - val_acc: 0.9157\n",
      "Epoch 10/50\n",
      "228947/228947 [==============================] - 134s 585us/step - loss: 0.2078 - acc: 0.9119 - val_loss: 0.2022 - val_acc: 0.9168\n",
      "Epoch 11/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.2009 - acc: 0.9158 - val_loss: 0.1958 - val_acc: 0.9215\n",
      "Epoch 12/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.1950 - acc: 0.9175 - val_loss: 0.2072 - val_acc: 0.9131\n",
      "Epoch 13/50\n",
      "228947/228947 [==============================] - 134s 585us/step - loss: 0.1899 - acc: 0.9204 - val_loss: 0.1923 - val_acc: 0.9248\n",
      "Epoch 14/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.1862 - acc: 0.9222 - val_loss: 0.1997 - val_acc: 0.9200\n",
      "Epoch 15/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.1818 - acc: 0.9235 - val_loss: 0.1903 - val_acc: 0.9248\n",
      "Epoch 16/50\n",
      "228947/228947 [==============================] - 134s 585us/step - loss: 0.1790 - acc: 0.9249 - val_loss: 0.1846 - val_acc: 0.9271\n",
      "Epoch 17/50\n",
      "228947/228947 [==============================] - 134s 585us/step - loss: 0.1754 - acc: 0.9271 - val_loss: 0.1964 - val_acc: 0.9232\n",
      "Epoch 18/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.1724 - acc: 0.9279 - val_loss: 0.1853 - val_acc: 0.9261\n",
      "Epoch 19/50\n",
      "228947/228947 [==============================] - 134s 584us/step - loss: 0.1694 - acc: 0.9292 - val_loss: 0.1955 - val_acc: 0.9246\n",
      "Epoch 20/50\n",
      "228947/228947 [==============================] - 135s 591us/step - loss: 0.1668 - acc: 0.9305 - val_loss: 0.1919 - val_acc: 0.9263\n",
      "Epoch 21/50\n",
      "228947/228947 [==============================] - 136s 596us/step - loss: 0.1647 - acc: 0.9317 - val_loss: 0.1971 - val_acc: 0.9242\n",
      "Epoch 22/50\n",
      "228947/228947 [==============================] - 138s 601us/step - loss: 0.1629 - acc: 0.9323 - val_loss: 0.1910 - val_acc: 0.9275\n",
      "Epoch 23/50\n",
      "228947/228947 [==============================] - 138s 602us/step - loss: 0.1600 - acc: 0.9334 - val_loss: 0.1901 - val_acc: 0.9278\n",
      "Epoch 24/50\n",
      "228947/228947 [==============================] - 137s 601us/step - loss: 0.1589 - acc: 0.9341 - val_loss: 0.1900 - val_acc: 0.9274\n",
      "Epoch 25/50\n",
      "228947/228947 [==============================] - 139s 605us/step - loss: 0.1563 - acc: 0.9346 - val_loss: 0.1858 - val_acc: 0.9283\n",
      "Epoch 26/50\n",
      "228947/228947 [==============================] - 137s 601us/step - loss: 0.1548 - acc: 0.9354 - val_loss: 0.1890 - val_acc: 0.9272\n",
      "load model ./log/20180707-011500.multi_lstm_cnn_char.016.hdf5\n",
      "Train on 228948 samples, validate on 25438 samples\n",
      "Epoch 1/50\n",
      "228948/228948 [==============================] - 140s 612us/step - loss: 0.4955 - acc: 0.7634 - val_loss: 0.3968 - val_acc: 0.8205\n",
      "Epoch 2/50\n",
      "228948/228948 [==============================] - 133s 582us/step - loss: 0.3544 - acc: 0.8414 - val_loss: 0.3060 - val_acc: 0.8623\n",
      "Epoch 3/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.3048 - acc: 0.8663 - val_loss: 0.2781 - val_acc: 0.8801\n",
      "Epoch 4/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.2762 - acc: 0.8800 - val_loss: 0.2487 - val_acc: 0.8942\n",
      "Epoch 5/50\n",
      "228948/228948 [==============================] - 136s 596us/step - loss: 0.2552 - acc: 0.8905 - val_loss: 0.2435 - val_acc: 0.8932\n",
      "Epoch 6/50\n",
      "228948/228948 [==============================] - 136s 596us/step - loss: 0.2410 - acc: 0.8962 - val_loss: 0.2231 - val_acc: 0.9064\n",
      "Epoch 7/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.2285 - acc: 0.9030 - val_loss: 0.2219 - val_acc: 0.9092\n",
      "Epoch 8/50\n",
      "228948/228948 [==============================] - 136s 595us/step - loss: 0.2193 - acc: 0.9074 - val_loss: 0.2135 - val_acc: 0.9136\n",
      "Epoch 9/50\n",
      "228948/228948 [==============================] - 136s 595us/step - loss: 0.2114 - acc: 0.9106 - val_loss: 0.2107 - val_acc: 0.9122\n",
      "Epoch 10/50\n",
      "228948/228948 [==============================] - 136s 595us/step - loss: 0.2048 - acc: 0.9136 - val_loss: 0.2011 - val_acc: 0.9191\n",
      "Epoch 11/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1992 - acc: 0.9158 - val_loss: 0.2013 - val_acc: 0.9176\n",
      "Epoch 12/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1925 - acc: 0.9188 - val_loss: 0.1944 - val_acc: 0.9237\n",
      "Epoch 13/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1898 - acc: 0.9207 - val_loss: 0.2022 - val_acc: 0.9194\n",
      "Epoch 14/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1853 - acc: 0.9226 - val_loss: 0.1959 - val_acc: 0.9223\n",
      "Epoch 15/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1812 - acc: 0.9239 - val_loss: 0.1874 - val_acc: 0.9264\n",
      "Epoch 16/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1775 - acc: 0.9256 - val_loss: 0.1930 - val_acc: 0.9247\n",
      "Epoch 17/50\n",
      "228948/228948 [==============================] - 137s 597us/step - loss: 0.1746 - acc: 0.9272 - val_loss: 0.1884 - val_acc: 0.9279\n",
      "Epoch 18/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1712 - acc: 0.9286 - val_loss: 0.1961 - val_acc: 0.9252\n",
      "Epoch 19/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1697 - acc: 0.9290 - val_loss: 0.1886 - val_acc: 0.9262\n",
      "Epoch 20/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1665 - acc: 0.9303 - val_loss: 0.1879 - val_acc: 0.9277\n",
      "Epoch 21/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1635 - acc: 0.9324 - val_loss: 0.1892 - val_acc: 0.9255\n",
      "Epoch 22/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1624 - acc: 0.9327 - val_loss: 0.1926 - val_acc: 0.9250\n",
      "Epoch 23/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1603 - acc: 0.9337 - val_loss: 0.1971 - val_acc: 0.9243\n",
      "Epoch 24/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1578 - acc: 0.9348 - val_loss: 0.1947 - val_acc: 0.9269\n",
      "Epoch 25/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1563 - acc: 0.9352 - val_loss: 0.1888 - val_acc: 0.9277\n",
      "load model ./log/20180707-021514.multi_lstm_cnn_char.015.hdf5\n",
      "Train on 228948 samples, validate on 25438 samples\n",
      "Epoch 1/50\n",
      "228948/228948 [==============================] - 138s 604us/step - loss: 0.5008 - acc: 0.7604 - val_loss: 0.3611 - val_acc: 0.8382\n",
      "Epoch 2/50\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.3593 - acc: 0.8389 - val_loss: 0.3061 - val_acc: 0.8674\n",
      "Epoch 3/50\n",
      "228948/228948 [==============================] - 136s 594us/step - loss: 0.3075 - acc: 0.8651 - val_loss: 0.2661 - val_acc: 0.8857\n",
      "Epoch 4/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.2760 - acc: 0.8799 - val_loss: 0.2563 - val_acc: 0.8926\n",
      "Epoch 5/50\n",
      "228948/228948 [==============================] - 138s 603us/step - loss: 0.2584 - acc: 0.8890 - val_loss: 0.2345 - val_acc: 0.9008\n",
      "Epoch 6/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.2423 - acc: 0.8958 - val_loss: 0.2373 - val_acc: 0.8991\n",
      "Epoch 7/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.2297 - acc: 0.9020 - val_loss: 0.2224 - val_acc: 0.9053\n",
      "Epoch 8/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.2194 - acc: 0.9071 - val_loss: 0.2214 - val_acc: 0.9065\n",
      "Epoch 9/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.2110 - acc: 0.9114 - val_loss: 0.2108 - val_acc: 0.9111\n",
      "Epoch 10/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.2044 - acc: 0.9144 - val_loss: 0.2110 - val_acc: 0.9134\n",
      "Epoch 11/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1986 - acc: 0.9169 - val_loss: 0.2022 - val_acc: 0.9156\n",
      "Epoch 12/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1927 - acc: 0.9194 - val_loss: 0.1978 - val_acc: 0.9168\n",
      "Epoch 13/50\n",
      "228948/228948 [==============================] - 138s 603us/step - loss: 0.1884 - acc: 0.9215 - val_loss: 0.1962 - val_acc: 0.9199\n",
      "Epoch 14/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1843 - acc: 0.9231 - val_loss: 0.2018 - val_acc: 0.9194\n",
      "Epoch 15/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1815 - acc: 0.9246 - val_loss: 0.1961 - val_acc: 0.9211\n",
      "Epoch 16/50\n",
      "228948/228948 [==============================] - 138s 604us/step - loss: 0.1772 - acc: 0.9265 - val_loss: 0.1950 - val_acc: 0.9218\n",
      "Epoch 17/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1745 - acc: 0.9270 - val_loss: 0.1875 - val_acc: 0.9253\n",
      "Epoch 18/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.1722 - acc: 0.9285 - val_loss: 0.1884 - val_acc: 0.9241\n",
      "Epoch 19/50\n",
      "228948/228948 [==============================] - 136s 594us/step - loss: 0.1679 - acc: 0.9308 - val_loss: 0.1925 - val_acc: 0.9246\n",
      "Epoch 20/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1652 - acc: 0.9317 - val_loss: 0.1892 - val_acc: 0.9242\n",
      "Epoch 21/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1633 - acc: 0.9319 - val_loss: 0.1945 - val_acc: 0.9242\n",
      "Epoch 22/50\n",
      "228948/228948 [==============================] - 138s 604us/step - loss: 0.1608 - acc: 0.9336 - val_loss: 0.1867 - val_acc: 0.9248\n",
      "Epoch 23/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1586 - acc: 0.9341 - val_loss: 0.1880 - val_acc: 0.9274\n",
      "Epoch 24/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1569 - acc: 0.9350 - val_loss: 0.1852 - val_acc: 0.9276\n",
      "Epoch 25/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1552 - acc: 0.9357 - val_loss: 0.1868 - val_acc: 0.9268\n",
      "Epoch 26/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1537 - acc: 0.9357 - val_loss: 0.1850 - val_acc: 0.9278\n",
      "Epoch 27/50\n",
      "228948/228948 [==============================] - 139s 607us/step - loss: 0.1523 - acc: 0.9369 - val_loss: 0.1870 - val_acc: 0.9259\n",
      "Epoch 28/50\n",
      "228948/228948 [==============================] - 139s 609us/step - loss: 0.1510 - acc: 0.9372 - val_loss: 0.1874 - val_acc: 0.9262\n",
      "Epoch 29/50\n",
      "228948/228948 [==============================] - 138s 604us/step - loss: 0.1487 - acc: 0.9385 - val_loss: 0.1818 - val_acc: 0.9288\n",
      "Epoch 30/50\n",
      "228948/228948 [==============================] - 138s 603us/step - loss: 0.1475 - acc: 0.9387 - val_loss: 0.1944 - val_acc: 0.9266\n",
      "Epoch 31/50\n",
      "228948/228948 [==============================] - 139s 606us/step - loss: 0.1459 - acc: 0.9398 - val_loss: 0.1904 - val_acc: 0.9273\n",
      "Epoch 32/50\n",
      "228948/228948 [==============================] - 138s 604us/step - loss: 0.1445 - acc: 0.9400 - val_loss: 0.1881 - val_acc: 0.9275\n",
      "Epoch 33/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1439 - acc: 0.9406 - val_loss: 0.1888 - val_acc: 0.9282\n",
      "Epoch 34/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1424 - acc: 0.9406 - val_loss: 0.1859 - val_acc: 0.9300\n",
      "Epoch 35/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1421 - acc: 0.9417 - val_loss: 0.1888 - val_acc: 0.9273\n",
      "Epoch 36/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.1398 - acc: 0.9425 - val_loss: 0.1882 - val_acc: 0.9283\n",
      "Epoch 37/50\n",
      "228948/228948 [==============================] - 135s 591us/step - loss: 0.1379 - acc: 0.9428 - val_loss: 0.1832 - val_acc: 0.9289\n",
      "Epoch 38/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1381 - acc: 0.9431 - val_loss: 0.1850 - val_acc: 0.9288\n",
      "Epoch 39/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.1353 - acc: 0.9437 - val_loss: 0.1853 - val_acc: 0.9285\n",
      "load model ./log/20180707-031344.multi_lstm_cnn_char.029.hdf5\n",
      "Train on 228948 samples, validate on 25438 samples\n",
      "Epoch 1/50\n",
      "228948/228948 [==============================] - 139s 606us/step - loss: 0.4983 - acc: 0.7621 - val_loss: 0.3643 - val_acc: 0.8362\n",
      "Epoch 2/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.3561 - acc: 0.8407 - val_loss: 0.3013 - val_acc: 0.8664\n",
      "Epoch 3/50\n",
      "228948/228948 [==============================] - 135s 588us/step - loss: 0.3051 - acc: 0.8659 - val_loss: 0.2573 - val_acc: 0.8871\n",
      "Epoch 4/50\n",
      "228948/228948 [==============================] - 137s 597us/step - loss: 0.2778 - acc: 0.8788 - val_loss: 0.2411 - val_acc: 0.8981\n",
      "Epoch 5/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.2571 - acc: 0.8893 - val_loss: 0.2397 - val_acc: 0.8947\n",
      "Epoch 6/50\n",
      "228948/228948 [==============================] - 136s 596us/step - loss: 0.2417 - acc: 0.8971 - val_loss: 0.2161 - val_acc: 0.9060\n",
      "Epoch 7/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.2305 - acc: 0.9011 - val_loss: 0.2194 - val_acc: 0.9068\n",
      "Epoch 8/50\n",
      "228948/228948 [==============================] - 137s 597us/step - loss: 0.2191 - acc: 0.9074 - val_loss: 0.2058 - val_acc: 0.9139\n",
      "Epoch 9/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.2128 - acc: 0.9100 - val_loss: 0.1984 - val_acc: 0.9172\n",
      "Epoch 10/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.2053 - acc: 0.9139 - val_loss: 0.1912 - val_acc: 0.9196\n",
      "Epoch 11/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1998 - acc: 0.9161 - val_loss: 0.1955 - val_acc: 0.9200\n",
      "Epoch 12/50\n",
      "228948/228948 [==============================] - 139s 608us/step - loss: 0.1950 - acc: 0.9183 - val_loss: 0.1955 - val_acc: 0.9191\n",
      "Epoch 13/50\n",
      "228948/228948 [==============================] - 139s 606us/step - loss: 0.1905 - acc: 0.9207 - val_loss: 0.1856 - val_acc: 0.9244\n",
      "Epoch 14/50\n",
      "228948/228948 [==============================] - 135s 590us/step - loss: 0.1866 - acc: 0.9219 - val_loss: 0.1886 - val_acc: 0.9236\n",
      "Epoch 15/50\n",
      "228948/228948 [==============================] - 136s 595us/step - loss: 0.1823 - acc: 0.9240 - val_loss: 0.1824 - val_acc: 0.9255\n",
      "Epoch 16/50\n",
      "228948/228948 [==============================] - 137s 596us/step - loss: 0.1774 - acc: 0.9258 - val_loss: 0.1845 - val_acc: 0.9252\n",
      "Epoch 17/50\n",
      "228948/228948 [==============================] - 137s 597us/step - loss: 0.1752 - acc: 0.9274 - val_loss: 0.1783 - val_acc: 0.9274\n",
      "Epoch 18/50\n",
      "228948/228948 [==============================] - 137s 596us/step - loss: 0.1721 - acc: 0.9283 - val_loss: 0.1895 - val_acc: 0.9242\n",
      "Epoch 19/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1703 - acc: 0.9286 - val_loss: 0.1828 - val_acc: 0.9253\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 20/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1672 - acc: 0.9308 - val_loss: 0.1779 - val_acc: 0.9288\n",
      "Epoch 21/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1656 - acc: 0.9310 - val_loss: 0.1784 - val_acc: 0.9296\n",
      "Epoch 22/50\n",
      "228948/228948 [==============================] - 139s 609us/step - loss: 0.1628 - acc: 0.9315 - val_loss: 0.1902 - val_acc: 0.9268\n",
      "Epoch 23/50\n",
      "228948/228948 [==============================] - 139s 608us/step - loss: 0.1614 - acc: 0.9329 - val_loss: 0.1770 - val_acc: 0.9303\n",
      "Epoch 24/50\n",
      "228948/228948 [==============================] - 136s 594us/step - loss: 0.1586 - acc: 0.9336 - val_loss: 0.1793 - val_acc: 0.9304\n",
      "Epoch 25/50\n",
      "228948/228948 [==============================] - 136s 596us/step - loss: 0.1574 - acc: 0.9349 - val_loss: 0.1851 - val_acc: 0.9276\n",
      "Epoch 26/50\n",
      "228948/228948 [==============================] - 136s 595us/step - loss: 0.1549 - acc: 0.9357 - val_loss: 0.1773 - val_acc: 0.9291\n",
      "Epoch 27/50\n",
      "228948/228948 [==============================] - 136s 595us/step - loss: 0.1549 - acc: 0.9357 - val_loss: 0.1788 - val_acc: 0.9298\n",
      "Epoch 28/50\n",
      "228948/228948 [==============================] - 136s 595us/step - loss: 0.1525 - acc: 0.9367 - val_loss: 0.1816 - val_acc: 0.9309\n",
      "Epoch 29/50\n",
      "228948/228948 [==============================] - 136s 596us/step - loss: 0.1507 - acc: 0.9378 - val_loss: 0.1781 - val_acc: 0.9329\n",
      "Epoch 30/50\n",
      "228948/228948 [==============================] - 137s 597us/step - loss: 0.1490 - acc: 0.9388 - val_loss: 0.1797 - val_acc: 0.9309\n",
      "Epoch 31/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1470 - acc: 0.9392 - val_loss: 0.1828 - val_acc: 0.9304\n",
      "Epoch 32/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1464 - acc: 0.9389 - val_loss: 0.1795 - val_acc: 0.9314\n",
      "Epoch 33/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1450 - acc: 0.9397 - val_loss: 0.1842 - val_acc: 0.9274\n",
      "load model ./log/20180707-044434.multi_lstm_cnn_char.023.hdf5\n",
      "Train on 228948 samples, validate on 25438 samples\n",
      "Epoch 1/50\n",
      "228948/228948 [==============================] - 139s 608us/step - loss: 0.5012 - acc: 0.7600 - val_loss: 0.3757 - val_acc: 0.8280\n",
      "Epoch 2/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.3588 - acc: 0.8387 - val_loss: 0.2968 - val_acc: 0.8718\n",
      "Epoch 3/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.3092 - acc: 0.8641 - val_loss: 0.2757 - val_acc: 0.8807\n",
      "Epoch 4/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.2799 - acc: 0.8789 - val_loss: 0.2530 - val_acc: 0.8900\n",
      "Epoch 5/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.2579 - acc: 0.8891 - val_loss: 0.2437 - val_acc: 0.8964\n",
      "Epoch 6/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.2430 - acc: 0.8962 - val_loss: 0.2326 - val_acc: 0.9039\n",
      "Epoch 7/50\n",
      "228948/228948 [==============================] - 136s 592us/step - loss: 0.2308 - acc: 0.9013 - val_loss: 0.2228 - val_acc: 0.9082\n",
      "Epoch 8/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.2200 - acc: 0.9063 - val_loss: 0.2182 - val_acc: 0.9103\n",
      "Epoch 9/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.2132 - acc: 0.9097 - val_loss: 0.2100 - val_acc: 0.9155\n",
      "Epoch 10/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.2054 - acc: 0.9138 - val_loss: 0.2095 - val_acc: 0.9152\n",
      "Epoch 11/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.1987 - acc: 0.9162 - val_loss: 0.2125 - val_acc: 0.9157\n",
      "Epoch 12/50\n",
      "228948/228948 [==============================] - 134s 585us/step - loss: 0.1943 - acc: 0.9185 - val_loss: 0.2045 - val_acc: 0.9194\n",
      "Epoch 13/50\n",
      "228948/228948 [==============================] - 134s 585us/step - loss: 0.1884 - acc: 0.9211 - val_loss: 0.1942 - val_acc: 0.9228\n",
      "Epoch 14/50\n",
      "228948/228948 [==============================] - 134s 585us/step - loss: 0.1849 - acc: 0.9223 - val_loss: 0.2038 - val_acc: 0.9216\n",
      "Epoch 15/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.1810 - acc: 0.9242 - val_loss: 0.1961 - val_acc: 0.9236\n",
      "Epoch 16/50\n",
      "228948/228948 [==============================] - 134s 585us/step - loss: 0.1782 - acc: 0.9253 - val_loss: 0.1951 - val_acc: 0.9243\n",
      "Epoch 17/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.1756 - acc: 0.9266 - val_loss: 0.1877 - val_acc: 0.9270\n",
      "Epoch 18/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.1719 - acc: 0.9278 - val_loss: 0.1922 - val_acc: 0.9259\n",
      "Epoch 19/50\n",
      "228948/228948 [==============================] - 135s 588us/step - loss: 0.1689 - acc: 0.9295 - val_loss: 0.1908 - val_acc: 0.9266\n",
      "Epoch 20/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.1663 - acc: 0.9306 - val_loss: 0.1904 - val_acc: 0.9264\n",
      "Epoch 21/50\n",
      "228948/228948 [==============================] - 137s 597us/step - loss: 0.1646 - acc: 0.9316 - val_loss: 0.1892 - val_acc: 0.9288\n",
      "Epoch 22/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.1608 - acc: 0.9327 - val_loss: 0.1854 - val_acc: 0.9274\n",
      "Epoch 23/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1594 - acc: 0.9337 - val_loss: 0.1895 - val_acc: 0.9286\n",
      "Epoch 24/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1574 - acc: 0.9344 - val_loss: 0.1905 - val_acc: 0.9270\n",
      "Epoch 25/50\n",
      "228948/228948 [==============================] - 138s 603us/step - loss: 0.1546 - acc: 0.9358 - val_loss: 0.1959 - val_acc: 0.9281\n",
      "Epoch 26/50\n",
      "228948/228948 [==============================] - 138s 603us/step - loss: 0.1542 - acc: 0.9358 - val_loss: 0.1916 - val_acc: 0.9274\n",
      "Epoch 27/50\n",
      "228948/228948 [==============================] - 138s 604us/step - loss: 0.1518 - acc: 0.9376 - val_loss: 0.1897 - val_acc: 0.9287\n",
      "Epoch 28/50\n",
      "228948/228948 [==============================] - 138s 604us/step - loss: 0.1508 - acc: 0.9372 - val_loss: 0.1895 - val_acc: 0.9283\n",
      "Epoch 29/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1492 - acc: 0.9380 - val_loss: 0.2004 - val_acc: 0.9291\n",
      "Epoch 30/50\n",
      "228948/228948 [==============================] - 138s 603us/step - loss: 0.1464 - acc: 0.9394 - val_loss: 0.1983 - val_acc: 0.9286\n",
      "Epoch 31/50\n",
      "228948/228948 [==============================] - 139s 608us/step - loss: 0.1464 - acc: 0.9399 - val_loss: 0.1945 - val_acc: 0.9282\n",
      "Epoch 32/50\n",
      "228948/228948 [==============================] - 140s 610us/step - loss: 0.1449 - acc: 0.9402 - val_loss: 0.1976 - val_acc: 0.9264\n",
      "load model ./log/20180707-060131.multi_lstm_cnn_char.022.hdf5\n",
      "Train on 228948 samples, validate on 25438 samples\n",
      "Epoch 1/50\n",
      "228948/228948 [==============================] - 141s 614us/step - loss: 0.5046 - acc: 0.7564 - val_loss: 0.3719 - val_acc: 0.8311\n",
      "Epoch 2/50\n",
      "228948/228948 [==============================] - 134s 585us/step - loss: 0.3603 - acc: 0.8390 - val_loss: 0.3121 - val_acc: 0.8621\n",
      "Epoch 3/50\n",
      "228948/228948 [==============================] - 134s 585us/step - loss: 0.3097 - acc: 0.8636 - val_loss: 0.2706 - val_acc: 0.8854\n",
      "Epoch 4/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.2783 - acc: 0.8787 - val_loss: 0.2495 - val_acc: 0.8941\n",
      "Epoch 5/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.2576 - acc: 0.8892 - val_loss: 0.2344 - val_acc: 0.9004\n",
      "Epoch 6/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.2423 - acc: 0.8959 - val_loss: 0.2302 - val_acc: 0.9033\n",
      "Epoch 7/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.2295 - acc: 0.9023 - val_loss: 0.2160 - val_acc: 0.9097\n",
      "Epoch 8/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.2211 - acc: 0.9056 - val_loss: 0.2094 - val_acc: 0.9126\n",
      "Epoch 9/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.2122 - acc: 0.9102 - val_loss: 0.2061 - val_acc: 0.9161\n",
      "Epoch 10/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.2053 - acc: 0.9135 - val_loss: 0.2036 - val_acc: 0.9154\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 11/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.1988 - acc: 0.9160 - val_loss: 0.2105 - val_acc: 0.9152\n",
      "Epoch 12/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.1934 - acc: 0.9184 - val_loss: 0.1998 - val_acc: 0.9182\n",
      "Epoch 13/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.1893 - acc: 0.9208 - val_loss: 0.1984 - val_acc: 0.9204\n",
      "Epoch 14/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.1847 - acc: 0.9227 - val_loss: 0.2102 - val_acc: 0.9153\n",
      "Epoch 15/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.1818 - acc: 0.9245 - val_loss: 0.1905 - val_acc: 0.9233\n",
      "Epoch 16/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.1774 - acc: 0.9255 - val_loss: 0.1955 - val_acc: 0.9228\n",
      "Epoch 17/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.1751 - acc: 0.9267 - val_loss: 0.1951 - val_acc: 0.9234\n",
      "Epoch 18/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.1719 - acc: 0.9282 - val_loss: 0.1925 - val_acc: 0.9215\n",
      "Epoch 19/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.1695 - acc: 0.9287 - val_loss: 0.1927 - val_acc: 0.9252\n",
      "Epoch 20/50\n",
      "228948/228948 [==============================] - 134s 586us/step - loss: 0.1657 - acc: 0.9311 - val_loss: 0.1984 - val_acc: 0.9214\n",
      "Epoch 21/50\n",
      "228948/228948 [==============================] - 134s 587us/step - loss: 0.1644 - acc: 0.9317 - val_loss: 0.1980 - val_acc: 0.9249\n",
      "Epoch 22/50\n",
      "228948/228948 [==============================] - 136s 595us/step - loss: 0.1623 - acc: 0.9319 - val_loss: 0.1914 - val_acc: 0.9244\n",
      "Epoch 23/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1602 - acc: 0.9333 - val_loss: 0.1955 - val_acc: 0.9233\n",
      "Epoch 24/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1585 - acc: 0.9333 - val_loss: 0.1928 - val_acc: 0.9236\n",
      "Epoch 25/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1556 - acc: 0.9351 - val_loss: 0.1935 - val_acc: 0.9257\n",
      "load model ./log/20180707-071534.multi_lstm_cnn_char.015.hdf5\n",
      "Train on 228948 samples, validate on 25438 samples\n",
      "Epoch 1/50\n",
      "228948/228948 [==============================] - 140s 611us/step - loss: 0.5002 - acc: 0.7594 - val_loss: 0.3864 - val_acc: 0.8224\n",
      "Epoch 2/50\n",
      "228948/228948 [==============================] - 136s 592us/step - loss: 0.3623 - acc: 0.8367 - val_loss: 0.2982 - val_acc: 0.8691\n",
      "Epoch 3/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.3076 - acc: 0.8647 - val_loss: 0.2731 - val_acc: 0.8803\n",
      "Epoch 4/50\n",
      "228948/228948 [==============================] - 138s 604us/step - loss: 0.2768 - acc: 0.8802 - val_loss: 0.2399 - val_acc: 0.8989\n",
      "Epoch 5/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.2565 - acc: 0.8899 - val_loss: 0.2285 - val_acc: 0.9048\n",
      "Epoch 6/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.2407 - acc: 0.8977 - val_loss: 0.2233 - val_acc: 0.9079\n",
      "Epoch 7/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.2288 - acc: 0.9027 - val_loss: 0.2185 - val_acc: 0.9079\n",
      "Epoch 8/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.2195 - acc: 0.9078 - val_loss: 0.2028 - val_acc: 0.9159\n",
      "Epoch 9/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.2103 - acc: 0.9113 - val_loss: 0.2101 - val_acc: 0.9114\n",
      "Epoch 10/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.2042 - acc: 0.9141 - val_loss: 0.2019 - val_acc: 0.9151\n",
      "Epoch 11/50\n",
      "228948/228948 [==============================] - 134s 583us/step - loss: 0.1975 - acc: 0.9169 - val_loss: 0.1928 - val_acc: 0.9207\n",
      "Epoch 12/50\n",
      "228948/228948 [==============================] - 133s 583us/step - loss: 0.1927 - acc: 0.9194 - val_loss: 0.1985 - val_acc: 0.9210\n",
      "Epoch 13/50\n",
      "228948/228948 [==============================] - 134s 584us/step - loss: 0.1898 - acc: 0.9202 - val_loss: 0.1927 - val_acc: 0.9202\n",
      "Epoch 14/50\n",
      "228948/228948 [==============================] - 135s 592us/step - loss: 0.1854 - acc: 0.9224 - val_loss: 0.1854 - val_acc: 0.9234\n",
      "Epoch 15/50\n",
      "228948/228948 [==============================] - 136s 596us/step - loss: 0.1805 - acc: 0.9246 - val_loss: 0.1868 - val_acc: 0.9261\n",
      "Epoch 16/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.1772 - acc: 0.9261 - val_loss: 0.1870 - val_acc: 0.9248\n",
      "Epoch 17/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1740 - acc: 0.9274 - val_loss: 0.1814 - val_acc: 0.9284\n",
      "Epoch 18/50\n",
      "228948/228948 [==============================] - 137s 599us/step - loss: 0.1707 - acc: 0.9289 - val_loss: 0.1823 - val_acc: 0.9272\n",
      "Epoch 19/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1683 - acc: 0.9296 - val_loss: 0.1844 - val_acc: 0.9250\n",
      "Epoch 20/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1666 - acc: 0.9307 - val_loss: 0.1801 - val_acc: 0.9290\n",
      "Epoch 21/50\n",
      "228948/228948 [==============================] - 137s 600us/step - loss: 0.1640 - acc: 0.9315 - val_loss: 0.1775 - val_acc: 0.9301\n",
      "Epoch 22/50\n",
      "228948/228948 [==============================] - 138s 601us/step - loss: 0.1617 - acc: 0.9331 - val_loss: 0.1883 - val_acc: 0.9273\n",
      "Epoch 23/50\n",
      "228948/228948 [==============================] - 138s 603us/step - loss: 0.1588 - acc: 0.9344 - val_loss: 0.1830 - val_acc: 0.9298\n",
      "Epoch 24/50\n",
      "228948/228948 [==============================] - 138s 604us/step - loss: 0.1572 - acc: 0.9346 - val_loss: 0.1818 - val_acc: 0.9307\n",
      "Epoch 25/50\n",
      "228948/228948 [==============================] - 136s 596us/step - loss: 0.1551 - acc: 0.9357 - val_loss: 0.1846 - val_acc: 0.9292\n",
      "Epoch 26/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1538 - acc: 0.9361 - val_loss: 0.1854 - val_acc: 0.9252\n",
      "Epoch 27/50\n",
      "228948/228948 [==============================] - 138s 602us/step - loss: 0.1511 - acc: 0.9373 - val_loss: 0.1872 - val_acc: 0.9277\n",
      "Epoch 28/50\n",
      "228948/228948 [==============================] - 137s 597us/step - loss: 0.1515 - acc: 0.9374 - val_loss: 0.1843 - val_acc: 0.9287\n",
      "Epoch 29/50\n",
      "228948/228948 [==============================] - 136s 596us/step - loss: 0.1499 - acc: 0.9382 - val_loss: 0.1849 - val_acc: 0.9307\n",
      "Epoch 30/50\n",
      "228948/228948 [==============================] - 137s 597us/step - loss: 0.1477 - acc: 0.9385 - val_loss: 0.1832 - val_acc: 0.9312\n",
      "Epoch 31/50\n",
      "228948/228948 [==============================] - 137s 598us/step - loss: 0.1455 - acc: 0.9396 - val_loss: 0.1821 - val_acc: 0.9301\n",
      "load model ./log/20180707-081326.multi_lstm_cnn_char.021.hdf5\n"
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "best_results = []\n",
    "last_results = []\n",
    "\n",
    "for i, (train_index, dev_index) in enumerate(StratifiedKFold(n_splits=10, shuffle=True).split(X=char1, y=label)):\n",
    "    train_char1, train_char2, train_y = char1[train_index, :], char2[train_index, :], label[train_index]\n",
    "    dev_char1, dev_char2, dev_y = char1[dev_index, :], char2[dev_index, :], label[dev_index]\n",
    "    \n",
    "    input1 = Input(shape=(SEQ_LEN,), dtype=\"int32\")\n",
    "    input2 = Input(shape=(SEQ_LEN,), dtype=\"int32\")\n",
    "\n",
    "    embedding_layer = Embedding(\n",
    "        input_dim=char_embedding_data.shape[0],\n",
    "        output_dim=char_embedding_data.shape[1],\n",
    "        weights=[char_embedding_data],\n",
    "        input_length=SEQ_LEN,\n",
    "        trainable=False\n",
    "    )\n",
    "\n",
    "    vector1 = embedding_layer(input1)\n",
    "    vector2 = embedding_layer(input2)\n",
    "\n",
    "    lstm_layer1 = LSTM(LSTM_SIZE_1, dropout=DROP_RATE, recurrent_dropout=DROP_RATE, return_sequences=True)\n",
    "    first_1 = lstm_layer1(vector1)\n",
    "    first_1 = Dropout(DROP_RATE)(first_1)\n",
    "    first_2 = lstm_layer1(vector2)\n",
    "    first_2 = Dropout(DROP_RATE)(first_2)\n",
    "\n",
    "    lstm_layer2 = LSTM(LSTM_SIZE_2, dropout=DROP_RATE, recurrent_dropout=DROP_RATE, return_sequences=True)\n",
    "    second_1 = lstm_layer2(first_1)\n",
    "    second_2 = lstm_layer2(first_2)\n",
    "\n",
    "    conv1a, conv1b = cnn_layer_1(second_1, second_2, kernel_size=1, filters=CONV_LEN_1)\n",
    "    conv2a, conv2b = cnn_layer_1(second_1, second_2, kernel_size=2, filters=CONV_LEN_2)\n",
    "    conv3a, conv3b = cnn_layer_1(second_1, second_2, kernel_size=3, filters=CONV_LEN_3)\n",
    "    conv4a, conv4b = cnn_layer_1(second_1, second_2, kernel_size=4, filters=CONV_LEN_4)\n",
    "    conv5a, conv5b = cnn_layer_1(second_1, second_2, kernel_size=5, filters=CONV_LEN_5)\n",
    "    conv6a, conv6b = cnn_layer_1(second_1, second_2, kernel_size=6, filters=CONV_LEN_6)\n",
    "\n",
    "    merge_a = concatenate([conv1a, conv2a, conv3a, conv4a, conv5a, conv6a])\n",
    "    merge_b = concatenate([conv1b, conv2b, conv3b, conv4b, conv5b, conv6b])\n",
    "    diff = Lambda(lambda x: K.abs(x[0] - x[1]))([merge_a, merge_b])\n",
    "    mult = Lambda(lambda x: x[0] * x[1])([merge_a, merge_b])\n",
    "    merge = concatenate([diff, mult])\n",
    "\n",
    "    x = Dropout(DROP_RATE)(merge)\n",
    "    x = BatchNormalization()(x)\n",
    "\n",
    "    x = Dense(DENSE_SIZE, activation=\"relu\")(x)\n",
    "    x = Dropout(DROP_RATE)(x)\n",
    "    x = BatchNormalization()(x)\n",
    "\n",
    "    pred = Dense(1, activation=\"sigmoid\")(x)\n",
    "\n",
    "    model = Model(inputs=[input1, input2], outputs=pred)\n",
    "    model.compile(\n",
    "        optimizer=\"nadam\",\n",
    "        loss=\"binary_crossentropy\",\n",
    "        metrics=[\"acc\"]\n",
    "    )\n",
    "\n",
    "    early_stopping = EarlyStopping(\"val_loss\", patience=10)\n",
    "    check_point = ModelCheckpoint(\n",
    "        \"./log/%s.multi_lstm_cnn_char.{epoch:03d}.hdf5\" % (datetime.now().strftime(\"%Y%m%d-%H%M%S\")),\n",
    "        monitor=\"val_loss\",\n",
    "        save_best_only=True,\n",
    "    )\n",
    "\n",
    "    fit_res = model.fit(\n",
    "        x=[train_char1, train_char2],\n",
    "        y=train_y,\n",
    "        batch_size=BATCH_SIZE,\n",
    "        epochs=NUM_EPOCHES,\n",
    "        validation_data=([dev_char1, dev_char2], dev_y),\n",
    "        shuffle=True,\n",
    "        callbacks=[early_stopping, check_point]\n",
    "    )\n",
    "\n",
    "    pred_last = model.predict([test_char1, test_char2], batch_size=BATCH_SIZE)\n",
    "    last_results.append(pd.DataFrame(pred_last, columns=[\"y_pre\"]))\n",
    "\n",
    "    print(\"load model %s\" % (glob(\"./log/*.hdf5\")[-1].replace(\"\\\\\", \"/\"),))\n",
    "    model.load_weights(glob(\"./log/*.hdf5\")[-1].replace(\"\\\\\", \"/\"))\n",
    "    pred_best = model.predict([test_char1, test_char2], batch_size=BATCH_SIZE)\n",
    "    best_results.append(pd.DataFrame(pred_best, columns=[\"y_pre\"]))\n",
    "\n",
    "pd.DataFrame(pd.concat(last_results, axis=1).mean(axis=1), columns=[\"y_pre\"]).to_csv(\n",
    "    \"./result/%s-multi_lstm_cnn_char_last.csv\" % (datetime.now().strftime(\"%Y%m%d-%H%M%S\")),\n",
    "    index=False\n",
    ")\n",
    "pd.DataFrame(pd.concat(best_results, axis=1).mean(axis=1), columns=[\"y_pre\"]).to_csv(\n",
    "    \"./result/%s-multi_lstm_cnn_char_best.csv\" % (datetime.now().strftime(\"%Y%m%d-%H%M%S\")),\n",
    "    index=False\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
