{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.0.0-alpha0\n",
      "sys.version_info(major=3, minor=7, micro=3, releaselevel='final', serial=0)\n",
      "matplotlib 3.0.3\n",
      "numpy 1.16.2\n",
      "pandas 0.24.2\n",
      "sklearn 0.20.3\n",
      "tensorflow 2.0.0-alpha0\n",
      "tensorflow.python.keras.api._v2.keras 2.2.4-tf\n"
     ]
    }
   ],
   "source": [
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import numpy as np\n",
    "import sklearn\n",
    "import pandas as pd\n",
    "import os\n",
    "import sys\n",
    "import time\n",
    "import tensorflow as tf\n",
    "\n",
    "from tensorflow import keras\n",
    "\n",
    "print(tf.__version__)\n",
    "print(sys.version_info)\n",
    "for module in mpl, np, pd, sklearn, tf, keras:\n",
    "    print(module.__name__, module.__version__)\n",
    "    \n",
    "'''\n",
    "找寻最好的超参数，使用RandomizedSearchCV\n",
    "1. 将一般的训练model转化为sklearn的model\n",
    "2. 定义参数集合\n",
    "3. 搜索参数\n",
    "4. 获取最好的模型\n",
    "\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'numpy.ndarray'>\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "data=np.linspace(0.01,2,20)\n",
    "print(type(data))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      ".. _california_housing_dataset:\n",
      "\n",
      "California Housing dataset\n",
      "--------------------------\n",
      "\n",
      "**Data Set Characteristics:**\n",
      "\n",
      "    :Number of Instances: 20640\n",
      "\n",
      "    :Number of Attributes: 8 numeric, predictive attributes and the target\n",
      "\n",
      "    :Attribute Information:\n",
      "        - MedInc        median income in block\n",
      "        - HouseAge      median house age in block\n",
      "        - AveRooms      average number of rooms\n",
      "        - AveBedrms     average number of bedrooms\n",
      "        - Population    block population\n",
      "        - AveOccup      average house occupancy\n",
      "        - Latitude      house block latitude\n",
      "        - Longitude     house block longitude\n",
      "\n",
      "    :Missing Attribute Values: None\n",
      "\n",
      "This dataset was obtained from the StatLib repository.\n",
      "http://lib.stat.cmu.edu/datasets/\n",
      "\n",
      "The target variable is the median house value for California districts.\n",
      "\n",
      "This dataset was derived from the 1990 U.S. census, using one row per census\n",
      "block group. A block group is the smallest geographical unit for which the U.S.\n",
      "Census Bureau publishes sample data (a block group typically has a population\n",
      "of 600 to 3,000 people).\n",
      "\n",
      "It can be downloaded/loaded using the\n",
      ":func:`sklearn.datasets.fetch_california_housing` function.\n",
      "\n",
      ".. topic:: References\n",
      "\n",
      "    - Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,\n",
      "      Statistics and Probability Letters, 33 (1997) 291-297\n",
      "\n",
      "(20640, 8)\n",
      "(20640,)\n"
     ]
    }
   ],
   "source": [
    "from sklearn.datasets import fetch_california_housing\n",
    "\n",
    "housing = fetch_california_housing()\n",
    "print(housing.DESCR)\n",
    "print(housing.data.shape)\n",
    "print(housing.target.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(11610, 8) (11610,)\n",
      "(3870, 8) (3870,)\n",
      "(5160, 8) (5160,)\n"
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "x_train_all, x_test, y_train_all, y_test = train_test_split(\n",
    "    housing.data, housing.target, random_state = 7)\n",
    "x_train, x_valid, y_train, y_valid = train_test_split(\n",
    "    x_train_all, y_train_all, random_state = 11)\n",
    "print(x_train.shape, y_train.shape)\n",
    "print(x_valid.shape, y_valid.shape)\n",
    "print(x_test.shape, y_test.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "scaler = StandardScaler()\n",
    "x_train_scaled = scaler.fit_transform(x_train)\n",
    "x_valid_scaled = scaler.transform(x_valid)\n",
    "x_test_scaled = scaler.transform(x_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 11610 samples, validate on 3870 samples\n",
      "Epoch 1/10\n",
      "11610/11610 [==============================] - 1s 83us/sample - loss: 1.4329 - val_loss: 0.7654\n",
      "Epoch 2/10\n",
      "11610/11610 [==============================] - 1s 49us/sample - loss: 0.6528 - val_loss: 0.6674\n",
      "Epoch 3/10\n",
      "11610/11610 [==============================] - 1s 53us/sample - loss: 0.6594 - val_loss: 0.7570\n",
      "Epoch 4/10\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 1.1426 - val_loss: 0.7110\n",
      "Epoch 5/10\n",
      "11610/11610 [==============================] - 1s 49us/sample - loss: 0.5703 - val_loss: 0.5765\n",
      "Epoch 6/10\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.5263 - val_loss: 0.5544\n",
      "Epoch 7/10\n",
      "11610/11610 [==============================] - 1s 52us/sample - loss: 0.5052 - val_loss: 0.5334\n",
      "Epoch 8/10\n",
      "11610/11610 [==============================] - 1s 65us/sample - loss: 0.4915 - val_loss: 0.5176\n",
      "Epoch 9/10\n",
      "11610/11610 [==============================] - 1s 59us/sample - loss: 0.4800 - val_loss: 0.5063\n",
      "Epoch 10/10\n",
      "11610/11610 [==============================] - 1s 75us/sample - loss: 0.4717 - val_loss: 0.4973\n"
     ]
    }
   ],
   "source": [
    "# RandomizedSearchCV\n",
    "# 1. 转化为sklearn的model\n",
    "# 2. 定义参数集合\n",
    "# 3. 搜索参数\n",
    "\n",
    "\n",
    "## 将keras的model转化为sklearn的model\n",
    "## 定义一个函数，函数的返回值是一个keras模型，该函数就是用来生成sklearn模型的\n",
    "## 函数的参数包括，模型层次数，层次neural，以及学习率\n",
    "def build_model(hidden_layers = 1,\n",
    "                layer_size = 30,\n",
    "                learning_rate = 3e-3):\n",
    "    model = keras.models.Sequential()\n",
    "    model.add(keras.layers.Dense(layer_size, activation='relu',\n",
    "                                 input_shape=x_train.shape[1:]))\n",
    "    for _ in range(hidden_layers - 1):\n",
    "        model.add(keras.layers.Dense(layer_size,\n",
    "                                     activation = 'relu'))\n",
    "        \n",
    "    model.add(keras.layers.Dense(1))\n",
    "    ## 自定义梯度下降 学习率\n",
    "    optimizer = keras.optimizers.SGD(learning_rate)\n",
    "    model.compile(loss = 'mse', optimizer = optimizer)\n",
    "    return model\n",
    "\n",
    "## 调用API，传入函数对象，返回sklearn_model\n",
    "sklearn_model = KerasRegressor(\n",
    "    build_fn = build_model)\n",
    "\n",
    "## 这个模型其实是可以训练的\n",
    "callbacks = [keras.callbacks.EarlyStopping(patience=5, min_delta=1e-2)]\n",
    "history = sklearn_model.fit(x_train_scaled, y_train,\n",
    "                            epochs = 10,\n",
    "                            validation_data = (x_valid_scaled, y_valid),\n",
    "                            callbacks = callbacks)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAecAAAEzCAYAAAALosttAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xl8VfWd//HX997cm30lgQQCBARBEhQkgCsGQUFtta1aa6tVuzhdrHZm2l/tMl1nptPaGTud2qqtdenYWkDbsZWK1oKKKwFBCJvImkDYE7Kv398f5wZCCOQmuck59+b9fDzuI3c599zPN4G8c77ne75fY61FREREvMPndgEiIiJyMoWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHhMj+FsjPmNMeaAMWbDaV43xpifGWO2GWPeNcacH/kyRUREho5wjpwfAxae4fWrgImh253AL/tfloiIyNDVYzhba18Bjpxhk+uAJ6zjTSDDGJMXqQJFRESGmkiccx4F7On0uDz0nIiIiPRB3GB+mDHmTpyubxISEmaMGTNmwD+z3cLumnYy4g0Z8Sby+29vx+eL7nF1XmlDZV07bRZGpfStlt60o6bZcrjRMjLFR9D9ph/nlZ9Ff6kd3hELbYDYaMfWrVsPWWtzwtrYWtvjDSgANpzmtYeAmzs93gLk9bTPs88+2w6WK//rZXvrI28NyL6XL18+IPsdTF5ow85DtXbs1/5if/739/q8j96048CxRltw71/s/S9u6fPnDQQv/CwiQe3wjlhog7Wx0Q6g1IaRudbaiHRrPwt8MjRq+wKg2lq7LwL7jZjigkzW7DpKW7sW+fCqJavL8Rm4/vz8Qfm8nNR4isdm8vyGykH5PBGR3gjnUqrfA28Ak4wx5caYTxtjPmeM+Vxok6XAdmAb8CvgCwNWbR/NLMiitqmVzZXH3C5FutHebnl6dTmXTswhNz1h0D53QWEumytr2HW4btA+U0QkHOGM1r7ZWptnrQ1Ya/OttY9Yax+01j4Yet1aa79orT3LWjvVWls68GX3TnFBJgClO4+6XIl05/X3D7O3upEbZgzOUXOHBYW5ACwr09GziHjLoA4Ic8uojETy0hNYtfMIt11U4HY50sXi1XtIS4jjiikjBvVzR2clUTgyjec3VHLnnLMG9bNFolFLSwvl5eU0NjYO+menp6ezadOmQf/cvkhISCA/P59AINDnfQyJcDbGUFyQxds7DmOtxZjIj9qWvjnW2MLzGyr5aPFoEgL+Qf/8hYW5/OeLWzlwrJHhaYPXpS4SjcrLy0lNTaWgoGDQf4/W1NSQmpo6qJ/ZF9ZaDh8+THl5OePGjevzfqJ7XHovzCzIZP+xJsqPNrhdinTyl3X7aGptH/Qu7Q4Li0Jd2xv3u/L5ItGksbGRYcOG6QDnDIwxDBs2rN+9C0MmnIvHZgFQuutMk53JYFu8eg9nj0jh3Px0Vz5/wvAUxmcns0yjtkXComDuWSS+R0MmnCflppIaH6dBYR6y7UAt7+yu4sYZo137D2+MYUFRLm9uP0xVfbMrNYhI+FJSUtwuYVAMmXD2+wzTx2YqnD1kyepy/D7DddNHulrHgsJcWtstL2064GodIiIdhkw4A8wcm8mW/TVU17e4XcqQ19rWzjNrypk7KYfhqe4OxDp3VDp56Qm6pEokilhr+epXv0pRURFTp07lD3/4AwD79u1jzpw5TJs2jaKiIl599VXa2tq4/fbbj297//33u1x9z4bEaO0OxQXOeefVu49w+eTBvWxHTvbqtkMcqGnihhmj3S4Fn8+woDCX37+9m/rmVpKCQ+q/hUhUeuaZZ1i7di3r1q3j0KFDzJw5kzlz5vC73/2OBQsW8M1vfpO2tjbq6+tZu3YtFRUVbNiwAYCqqiqXq+/ZkPotNG10BnE+w6qdRxXOLltSWk5WcpDLJw93uxQAriwcwWOv7+TlLQe5aqpWPBXpyff+XMbGvZGddXHKyDS+88HCsLZduXIlN998M36/nxEjRnDZZZexatUqZs6cyac+9SlaWlr40Ic+xLRp0xg/fjzbt2/nS1/6Etdccw1XXnllROseCEOqWzsx6KdoVDqlOzVi201V9c28uHE/100bSTDOG/8EZxVkkZkUUNe2SJSbM2cOr7zyCqNGjeL222/niSeeIDMzk3Xr1lFSUsKDDz7IZz7zGbfL7NGQOnIG53rnx1/fRWNLmyuTXgg8u24vzW3t3OiBLu0OcX4fV0wZwV/XV9Lc2u6ZPxpEvCrcI9yBcumll/LQQw9x2223ceTIEV555RXuu+8+du3aRX5+Pp/97GdpampizZo1XH311QSDQa6//nomTZrELbfc4mrt4Rhy4VxckMWvXt3Bhorq4+egZXAtLi1nSl4aU0amuV3KSRYU5rKotJzX3z9EySRvdLeLSPc+/OEP88Ybb3DeeedhjOHHP/4xubm5PP7449x3330EAgFSUlJ44oknqKio4I477qC9vR2AH/7why5X37OhF85jnUUwVu08qnB2webKY6yvqOY7H5zidimnuHhCNslBP8vK9iucRTyqtrYWcOYouO+++7jvvvtOev22227jtttuO+V9a9asGZT6ImXI9d0NS4lnfHYyqzVTmCuWlJYT8BuumzbK7VJOkRDwM3fycF7cWKm1v0XEVUMunMFZQrJ011Ha9Qt4ULW0tfOntRXMmzyCrOSg2+V0a0FhLodqm1m9S5PViIh7hmg4Z1FV38L7B2vdLmVIWb75AIdqm7mx2J1FLsIxd/Jwgn6fRm2LiKuGZDjPDJ1rXqWpPAfV4tXlZKfEc9nZOW6Xclop8XFcMjGb5zdUYq16VkTEHUMynAuGJZGdEtT1zoPoUG0Tyzcf4CPnjyLO7+1/dgsLc6moaqAswhMsiIiEy9u/JQeIMYbisVms0qCwQfOndypobbfc6NK6zb0xf8oIfAZ1bYuIa4ZkOIMzKGzPkQYqq/u3ILb0zFrLktXlnDc6g4kjUt0up0dZyUFmjcviea3xLCIuGbLh3HHeuVRHzwOubO8xNlfWcEMUHDV3WFiYy3sHajVoUCTKnWn95507d1JUVDSI1YRvyIbzlJFpJAb8Wt95ECwu3UMwzse157q7bnNvXFmYC6hrW0TcMWTDOeD3MW10ho6cB1hTaxv/t24vCwpzSU8KuF1O2EZmJHJefjrL1LUt4in33nsvDzzwwPHH3/3ud/nXf/1X5s2bx/nnn8/UqVP5v//7v17vt7GxkTvuuIOpU6cyffp0li9fDkBZWRmzZs1i2rRpnHvuubz33nvU1dVxzTXXcN5551FUVHR8LelIGnLTd3Y2syCTny/fRm1TKynxQ/pbMWBe2nSAqvqWqOrS7rCgKJcfP7+FvVUNjMxIdLscEW/5671QuT6y+8ydClf9xxk3uemmm/jyl7/MF7/4RQAWLVrEsmXLuPvuu0lLS+PQoUNccMEFXHvttRhjwv7oBx54AGMM69evZ/PmzVx55ZVs3bqVBx98kHvuuYdPfOITNDc309bWxtKlSxk5ciTPPfccANXV1X1v82kM2SNncCYjabfwzm51bQ+UxaV7yEtP4JIJ2W6X0msLQl3bL6hrW8Qzpk+fzoEDB9i7dy/r1q0jMzOT3NxcvvGNb3Duuecyf/58Kioq2L9/f6/2u3LlyuOrVU2ePJmxY8eydetWLrzwQv793/+dH/3oR+zatYvExESmTp3Kiy++yNe+9jVeffVV0tPTI97OIX24OH1MBj7jTEZy6UTvTowRrfYfa+TlrQf5fMlZ+H3h/wXrFWflpDBxeArLyvZz+8Xj3C5HxFt6OMIdSDfeeCNLliyhsrKSm266iSeffJKDBw+yevVqAoEABQUFNDZG5kqcj3/848yePZvnnnuOq6++moceeojLL7+cNWvWsHTpUr71rW8xb948vv3tb0fk8zoM6SPn1IQA5+SlaTKSAfLHdypot3CDh9Zt7q2FRbm8teMwR+qa3S5FREJuuukmnnrqKZYsWcKNN95IdXU1w4cPJxAIsHz5cnbt2tXrfV566aU8+eSTAGzdupXdu3czadIktm/fzvjx47n77ru57rrrePfdd9m7dy9JSUnccsstfPWrXx2QFa+GdDiDc0nVO7uraGlrd7uUmGKtZXHpHorHZjIuO9ntcvpsQWEu7Rb+trF3XWQiMnAKCwupqalh1KhR5OXl8YlPfILS0lKmTp3KE088weTJk3u9zy984Qu0t7czdepUbrrpJh577DHi4+NZtGgRRUVFTJs2jQ0bNvDJT36S9evXHx8k9r3vfY9vfetbEW/jkO7WBmcyksde38nGvcc4b3SG2+XEjHf2VPH+wTp+dP14t0vpl8KRaYzKSGRZWSUfnRm9PQAisWb9+hOD0bKzs3njjTe63a5j/efuFBQUsGHDBgASEhJ49NFHT9nm3nvv5d577z3puQULFrBgwYK+lB22IX/kXDy2YxEMdW1H0pLV5SQG/FwTRdc2d8cYw8KiXF597xC1Ta1ulyMiQ8SQD+fc9ARGZyVq/d4Iamxp48/r9nJVUW5MXKK2oDCX5rZ2lm8+4HYpItIH69evZ9q0aSfdZs+e7XZZZxT9vzkjoHhsFq++dwhrba+ui5PuLSurpKaxlRs8vG5zb8wYm0l2SpBlZZV88Lzo7gkQGYqmTp3K2rVr3S6jV4b8kTM4550P1Tax63C926XEhCWry8nPTOSCccPcLiUi/D7DFVNyWb75AI0tbW6XI+IqrXPes0h8jxTOnFgEQ+ed+6+iqoGV2w5x/fn5+KLw2ubTWVA4grrmNl7bdsjtUkRck5CQwOHDhxXQZ2Ct5fDhwyQkJPRrP+rWBibkpJCeGKB051FuLNaI3P54ZnU51hKV03WeyUVnZZMaH8eyskrmnTPC7XJEXJGfn095eTkHDx4c9M9ubGzsd+ANloSEBPLz+/c7UOEM+HyG4rGZrNIiGP1irWXJmnIuHD+M0VlJbpcTUcE4H5efM5wXN+6nta2dOL86nWToCQQCjBvnzmx5K1asYPr06a58thv0GyakuCCL7QfrOFzb5HYpUWvVzqPsOlwfc0fNHRYW5nK0voVVWmZURAaYwjlkZkEmAKW6pKrPFpfuISU+jqum5rpdyoC4bFIO8XE+rfEsIgNO4RwyNT+dYJxP82z3UV1TK8+t38c1U/NICsbm2ZKkYBxzzs5hWVmlBsSIyIBSOIfEx/k5Lz9dR8599NcNldQ3t8XMtc2ns7Awl33VjbxbHvn1W0VEOiicO5kxNosNFdU0NOta1t5aXLqHcdnJFI/NdLuUATXvnOHE+QzPq2tbRAaQwrmTmQWZtLRZ1pVXuV1KVNl9uJ63dhzhhhn5MT/DWkZSkAvGD2PZBnVti8jAUTh3MiN01Kfzzr2zZE05xsCHp49yu5RBsaAol+2H6th24PSr3YiI9IfCuZOMpCBnj0jRpTK90N5ueXp1OZdMyGZkRqLb5QyKBVNGYAw8v0Fd2yIyMBTOXRQXZLFm11Ha2tVlGY43th+moqphSM2sNjwtgemjM1i2UeEsIgND4dzFzIJMappa2VJZ43YpUWHJ6nJSE+K4csrQmtJyYVEuGyqOseeIFksRkcgLK5yNMQuNMVuMMduMMfd28/oYY8xyY8w7xph3jTFXR77UwVE81lkEo1RTefboWGMLf92wj2vPG0lCwO92OYNqQaEz0YomJBGRgdBjOBtj/MADwFXAFOBmY8yULpt9C1hkrZ0OfAz4RaQLHSz5mYnkpiVQqvPOPXru3X00trQPqS7tDmOHJTM5N5UXyva7XYqIxKBwjpxnAdustduttc3AU8B1XbaxQFrofjqwN3IlDi5jDMUFmRqxHYYlq8uZMDyF8/LT3S7FFQuLclm16wgHazQfu4hElunpWk1jzA3AQmvtZ0KPbwVmW2vv6rRNHvACkAkkA/Ottau72dedwJ0AOTk5MxYtWhSpdkTUi7taeHJTM/95WSLDEs/890ttbS0pKSmDVNnA6Esb9tW28/WVDXx0UoCrxwUHqLLeGeyfxZ6adv7ltQZuLwxSMjoQkX3Gwr8nUDu8JBbaALHRjrlz56621haHs22kJkG+GXjMWvufxpgLgd8aY4qste2dN7LWPgw8DDBp0iRbUlISoY+PrOyKap7ctJK4vEmUTDvztbsrVqzAq+0IV1/a8KPnN+P3becr189heJo31lgd7J+FtZZfb17BjpZkvlsyKyL7jIV/T6B2eEkstAFipx3hCqdbuwLofFIxP/RcZ58GFgFYa98AEoDsSBTohsm5qaTEx7FKXdvdamu3PLOmnMvOzvFMMLvBGMPCwlxef/8Q1Q0tbpcjIjEknHBeBUw0xowzxgRxBnw922Wb3cA8AGPMOTjhfDCShQ6mOL+P6WMyNCjsNF597yD7jzVxY4yu29wbVxbm0tJmWb75gNuliEgM6TGcrbWtwF3AMmATzqjsMmPM940x14Y2+2fgs8aYdcDvgdttlE88PLMgiy37a6iu1xFRV4tXl5OZFGDeOUPr2ubuTB+dwfDUeF1SJSIRFdY5Z2vtUmBpl+e+3en+RuDiyJbmruKCTKyFNbuPMnfycLfL8Yzq+hZeLNvPx2ePIRinOWx8PsOVhSN4enUFjS1tQ+56bxEZGPrtehrTRmcQ5zM679zFs+sqaG5r5wZ1aR+3sDCPhpY2XtkatWdyRMRjFM6nkRSMo3BUOqW7dN65s8WryzknL42iUUPz2ubuzB6fRXpiQGs8i0jEuBjO3j8lPXNsJuv2VNHU2uZ2KZ6wpbKGd8urNRCsi4Dfx7xzhvO3jftpaWvv+Q0iIj1wLZyT6/ZA2R/Bw+PGigsyaWptZ0PFMbdL8YQlq/cQ5zNcN22k26V4zsLCXI41tvLWdp0GEZH+c7dbe/Ht8KvLYccrrpZxOjM6FsHQeWda2tr54zsVzDtnOMNS4t0ux3PmnJ1DYsDP82X73C5FRGKAa+FclzwGrvsF1B6Axz8Iv/0I7HvXrXK6lZMaz7jsZFbF6vXOrU2w6tdM3nQ/vP0rOLDptD0ZK7Yc5FBtMzfOGHqLXIQjIeCnZFIOL5Ttp11rgYtIP0Vq+s6+mf4JKLoeVv0KXvkJPHQpTL0R5n4Tssa5WlqH4rGZ/G2T8wvX5zNulxMZLQ2w+nF47b+hZi/D4lJh6QrntaRsKLgExl0KBZdC9tlgDEtW7yE7Jchlk3JcLd3LFhbl8tcNlbyzp4oZYzPdLkdEopi74QwQSICLvgTTb3XC4s1fQtmfoPhTMOerkOJuGMwsyGLx6nK2H6plwvBUV2vpt+Y6KP0NvPYzqDsAYy+BDz/Ia7vaKZk2DnauhB2vws5XYeOfnPckD6cp/yJytgzj09OuIBArf6AMgLmThxPwG5aVVSqcRaRf3A/nDokZMP87MOtOePk/YNWvYe2TTnBf+EWIdycYiwucX7Krdh6N3nBuqnG+n6//D9QfhvElMOcxKAjNG7N7BWQWOLfptzhd20d3HA/r1q3L+de4g7DhEdiZe/KRddZ4MApsgLSEABedlc2yskq+ftVkjL4vItJH3gnnDml58MH/hgvvgpe+Dyt+6JwPvexrMON2iBvc5QnHZSczLDlI6c6j3DxrzKB+dr81VsNbD8ObD0DDUZgwH+b8Pxgz+8zvM8YJ3azxcP4nueGnrzAmaR8PXdJw4sh6wxJn29SRTlh3BHbmuCEd1guLcvn6M+vZXFnDOXlpPb9BRKQb3gvnDtkT4abfQnkp/O278NevOiFz+b9A4UfANzhj2YwxFBdkUrorikZs1x+Btx6ENx+Epmo4+yq47Kswakavd7WhoppNlTV8/LrZMKPA+QPJWji8zQnpHa/C9hWwPrQ2d1p+l7AuiGDDvG/+OSP4hlnP8xsqFc4i0mfeDecO+cVw259h20tOSD/9aefc9PzvwFnzBuUobWZBFsvK9nPgWKO3l0isO+z8AfPWw9BcA+d80Dlvn3den3e5ZHU5Qb+Pa8/rtK61Mc4fT9kTnbEB1sKhrSfCetvf4N2nnG3Tx3TqBr8EMqKs96GXclLjmTk2i2VllfzjFWe7XY6IRCnvhzM4YTBxPpx1udOd+vcfwP9eD+PmwPzv9umIsDeKC0LXO+86ytVT8wb0s/qk9oBzPnnVI9BSD4UfhjlfgRGF/dptU2sbf1pbwRWFI0hPCpx+Q2MgZ5Jzm/kZJ6wPbj7RBb71eVj3O2fbjLHOueqOsE6PvdnGFhTl8oO/bGTnoToKspPdLkdEolB0hHMHnw/O/ShMuQ5KH4VXfuxMYjLlQ053d/aEAfnYwpFpJAR8rNp5xFvhfGwfvP4z53vR1gRFNzihnDMpIrv/+6YDVNW39H66TmNg+DnObfad0N4OBzedCOstz8Ha/3W2zRwXOrKe43xNi/7Zx66cMoIf/GUjy8oq+YfLznK7HBGJQtEVzh3i4uGCz8G0j8MbP4fXfw6b/gwzbnMGjqXmRvTjAn4f00ZnUOqVyUiqy2HlT2HNE9DeCud9DC79ZxgW2SBYvLqcEWnxXDqxn5ez+XzOUfyIQufn1t4OB8pCYb0SNj0L7/zW2TbrrNA569CRdZqH/hgK0+isJIpGpSmcRaTPojOcOySkwdxvOF2pL/8YVj8K656CC74AF98NCZFbOWlmQRYPLN9GbVMrKfEufduO7oKV98M7oaPOaR+HS/5xQCZsOXCskZe3HuTOOePxR/raZp8Pcqc6twu/AO1tsH/DibAu+xOsedzZdtiEE0FdcCmkjohsLQNkYWEuP3lhK/uPNTLCy+MURMSTojucO6QMh2t+Ahd8Hpb/G7z6E2eyjUv/2QnuQP9/ORYXZNFuYe3uKi6ZmB2Bonvh8Puw8r+cPzyMz+khuPjLkDFwU2n+8Z0K2trt4KxA5fM7g9byzoOL7nLCuvLdE93g65c4f3iBM2NZ57B2eZKa01kQCucXyiq59cICt8sRkSgTG+HcYdhZcMNv4KK74aXvwQvfdC4pmvsNOPcmJwT66PwxGfgMrNp5ZPDC+dB7zrSm6xeBP+j8oXHxPQN+XtZay+LV5cwYm8n4nJQB/axu+fwwcrpzu/huaGuFfeucoN75qvNHSukjzrY5k4+Htb/VOwtyTBiewvicZJaV7Vc4i0ivxVY4dxg5DW79o3P97YvfgT993hnNPO87cPaCPl1+lZoQYHJuGqt3DcJ55wObnFDe8DQEEp1u+ovuHrQu3XXl1Ww7UMsPPzJ1UD6vR/44yJ/h3C75MrS1wN61J8J67ZOw6lcUJwyHyU8527nMGMPCwlweemU7VfXNZCQN7uQ5IhLd3F0ycqCNL4HPLocbHoXWRvj9TfDoVbD7rT7tbmZBJmt2H6W1rT2iZR5XuR4WfRJ+cYFz+dHF98A978KCfxvUc62LS/eQEPDxgXM9OhjLH4DRM+HSf3L+CPvaLrjlaYy18JsrnbnD2wfoZ9QLCwpzaWu3vLTpgNuliEiUie1wBmfwUdFH4ItvwzX/6Zy//c2V8NQn4OCWXu2quCCL+uY2Nu2riWyNe9+B338cHrwE3l/uTBzy5fVwxfcG/ZxqY0sbz67by1VFeaQmnOHaZi+JC8KE+ZQW/xTOXggv/gv87qNQd8jVss7NTycvPYHnyypdrUNEok/sh3MHf8A5Z3v3OzD3W7D9ZecI9f/uguqKsHZxYhGMCE3luWcVPHkjPFwCu1ZCyTecUL78W5CUFZnP6KUXNu6nprGVGwZjIFiEtQZS4Kb/hat/AjtegV9e7Hx1iTGGBYW5vLL1IPXNra7VISLRZ+iEc4f4FGee6XvWwezPwbt/gP85H174F2dO6jPIS09kVEZi/+fZ3vUG/PbD8Mh8Z+7wed+GL2+Akq85q3O5aHHpHkZlJHLh+GGu1tFnxsCsz8Jn/ub8rB+/Fpb/uzOozAULCnNpam3n5S0HXfl8EYlOQy+cOyQPg4U/hLtKnekuX/8f+Nk05zrilobTvm1mQSardh7FWtu7z7PWuTTosQ/Aowud88tX/MA5Ur70n51rtl22t6qBldsOcf2MfHzRvm5z3rlw58vOBC0v/wieuDbsHpJImlmQSVZyUF3bItIrQzecO2SOhQ8/CJ9bCaMvcBbX+Nn5sPrxbo+2iguyOFjTxO4j9eHt31p4/+/OQLTHP+BcHrXgh85Ar4vvdo7uPOKP71RgLdxwfvR1aXcrPsX52X7oQWd094OXwJbnB7WEOL+P+ecM5++bDtDc6v4gNRGJDgrnDrlF8IlFcPtSSB8Ff74bfnmhMy1op6PkmaFFMFb1NJWntbD1Bfj1fKcLu2q3cy70nnXOrFjBpIFsTa9Za1lcuofZ47IYM8xbtfXbtJvhH16GtFHOiP3nvw6tzYP28QuLcqlpauX1990doCYi0UPh3FXBxfDpF52BRQB/uAUeuQJ2vgbAxOEppCXEUXq6QWHWwubnnEFev7vRWTHqAz91BqLN+mxEZisbCKW7jrLzcD03Fg/crGOuyp7onIeedSe8+QvnZ3pk+6B89EVnZZMSH8cydW2LSJgUzt0xxlkL+fNvwAd/5pyrfOxqePJGfAfKKC7IorTrZCTt7c6c0A9eCk99HBqr4dqfw91roPgOZ7EOD1tcuofkoJ+rp0Z20RBPCSTA1fc5f3gd3QEPznGmBh1gCQE/JZNyeHHjftraezlWQUSGJIXzmfjjnHms714D878He96CBy/haw3303hwB0fqmsG2Ob/gf3kRLL7Nmezkww85A83Ov9W5hMvjmlotz727j6un5pEUjM1J405yzgedMQbDz4GnPw3PfgmawxxD0EcLi3I5VNs8ODPMiUjUGwK/iSMgkOhMGznjNlh5PxPf+CUvBZdxcMlKZlW+Dg0VzhzP1z/ijPzuxxzebijd30pdc1vsdml3J2MM3LHUucxq5f2w521nJrkRUwbk40omDScY5+P5DZXMGufONewiEj105NwbiZlwxfdp/mIpz9pLGLnjadp9AbjxcacLfOoNURfMAK9WtDJ2WBIzQ5OsDBn+AMz/Dtz6DNQfhl/NhdWPnTQAMFJS4uO4dEI2y8oqe38ZnogMOQrnPkgYNpan8r7GJ3KWUFp8PxR+yJkmNAq0t1samts4WtfMvuoGSnceYfPAaETXAAAdBUlEQVSRdm44Px/ThwVBYsJZl8PnXoMxF8Cf74Eln3LGDETYgqJcKqoaKNt7LOL7FpHYom7tPiouyOQ3K6u4Y3Jiv/ZjraW5rZ3GlnYaW9pCN+d+Q6fHTa1tNDSHHreeeL2py/uOv6e1naYu+2hsaaOpm2ttDfCRKJyuM6JSR8Atf4TX7oe//xvsXeMsPzoqcitczT9nBD4Dz2+opGhUesT2KyKxR+HcRzPHZvHQy9v507YWdgW2nxyCrW2nBK0Tpk5gdoRnQ3Mbja1tfe5FDcb5SIjzkRDwkxDwkxjwkxDwER/wk54YIDEt3nktznk+IdhxP/Q49J5DOzczKqN/f2TEBJ/Pma1t7MWw5NPwyJUw/7twwRcj0jOSlRxk9rhhLCur5CsLJvV7fyISuxTOfVRckElS0M/SHS0s3bEJgDifOR6UHeGXEPCRGPCTHB9HVvKJxydv0+lxKDwTg879+EDX94QCOM6PP0JTbK6ofi8i+4kZYy6Az73qjOJ+4VvO4hkfetCZ8rWfFhbl8p1ny9h2oJYJw70zO5yIeIvCuY8ykoK8+Y15LH95JfNKLiUhzkecPzrOO0sYkrKc66Hf/hW88E148GK4/tdQcEm/dntl4Qi+82wZy8oqmTB8QoSKFZFYozTph7SEAOnxhpT4OAVzLDIGZt/pzCwWSILHPwgr/gPa2/q8y7z0RM4bncELmi1MRM5AiSLSk7zznLm5p34UVvzQWYby2N4+725hYS7ryqvZW3X61c9EZGhTOIuEIz4VPvIQfOiXzkjuBy9xFjbpgwWFIwB09Cwip6VwFumNaR+Hf3gFUvOchU2WfbPXK1yNz0nh7BEpWuNZRE5L4SzSW9kT4TMvwczPwBs/h98sgCM7erWLBYW5vL3jCIdrmwaoSBGJZgpnkb4IJMA1/wkffQIOvw8PzYENT4f99gWFubRbeGnTgQEsUkSilcJZpD+mXOdcE50zyZn289m7w1rhqnBkGvmZieraFpFuKZxF+itzLNzxV7j4y7DmcfjV5XBg8xnfYoxhQWEuK987RE1jyyAVKiLRQuEsEgn+AFzxPbjlaag7CA+XwJonzrjC1cKiXJrb2lmx5eDg1SkiUSGscDbGLDTGbDHGbDPG3HuabT5qjNlojCkzxvwusmWKRIkJ8+Hzr8HoWc70n09/Ghq7X4Xq/DGZZKfEq2tbRE7RYzgbY/zAA8BVwBTgZmPMlC7bTAS+DlxsrS0EvjwAtYpEh9RcuPWPcPm/QNmfnMFiFWtO2czvM1wxZQQrNh+gsaXvs46JSOwJ58h5FrDNWrvdWtsMPAVc12WbzwIPWGuPAlhrNQRVhjafH+Z8BW5/DtpanBWu3vjFKd3cC4tyqWtu47Vth1wqVES8KJxwHgXs6fS4PPRcZ2cDZxtjXjPGvGmMWRipAkWi2tgLndHcE6+AZV+H338M6g4ff/nC8cNITYjj+Q3q2haRE4ztYTFhY8wNwEJr7WdCj28FZltr7+q0zV+AFuCjQD7wCjDVWlvVZV93AncC5OTkzFi0aFEEm+KO2tpaUlKie+m/WGgDeLwd1jKq4jnOev9RWgLpbJzyz1RnFALw0LpG1h9q47/nJtFQX+fdNvSCp38WvRAL7YiFNkBstGPu3LmrrbXF4WwbzpKRFcDoTo/zQ891Vg68Za1tAXYYY7YCE4FVnTey1j4MPAwwadIkW1JSEk6NnrZixQqivR2x0AaIhnbMhb23EL/kDqav+xZcdi/M+QqN2Qf43P+uIXHsVPx7Nni8DeHx/s8iPLHQjlhoA8ROO8IVTrf2KmCiMWacMSYIfAx4tss2fwJKAIwx2Tjd3NsjWKdIbBg5zZmbu+gGWPHv8MR1XJbXSnycj2Xq2haRkB7D2VrbCtwFLAM2AYustWXGmO8bY64NbbYMOGyM2QgsB75qrT3c/R5Fhrj4VPjIw3DdL6BiNYmPlPDF/O0sK9tPew+nmURkaAinWxtr7VJgaZfnvt3pvgX+KXQTkZ4YA9M/AfnFsPgO7q78BvGt17D76CfdrkxEPCCscBaRAZIzCT77Ek3PfZ1/WPsouzduhLS3IGM0pI8JfR0NwSS3KxWRQaRwFnFbIJH4D/2Un+0ZxYcO/xr7+s8w7a0nb5M0zAnprqHd8TUx0zkaF5GYoHAW8YismR9lzp+mcMlZGVw+qp3ZmXVMTKgiWFMO1Xugag8c3ALv/Q1aG05+czAlFNZdgzv0OHk4+DSVvki0UDiLeMQNM/JZuXYzO+va+MGrNVgLQX86540ew8yCLGbNymLG2ExS4+Og/jBU7T4R2se/7oY9b0Fj1ck79wchPf/0R99po5zFO0TEExTOIh6REPDzscnxlJTMobq+hdJdR3h7xxHe2nGEh1/Zzi9WvI/PQOHIdCesx41kZkEhw6bEn7qzpppOod0lxN/7G9R2uWzL+CA17+Sucp33FnGNwlnEg9KTAsw7ZwTzzhkBQH1zK+/sruKtHUd4e8dhnnxrF795bQcAE4enMHNcFrPHZTFrXBZ56YnO5Vojpji37rQ2QXX5qUfeVaEj77I/gs57i7hG4SwSBZKCcVw8IZuLJ2QD0NTaxoaK6lBYH+HZtXv53Vu7ARidlcjMgo6wHkbBsCRM19CMi4dhZzm37rS3Qc2+7o++ezzvPZqJdX7wr3G60tNGhb6OVNe5SJgUziJRKD7Oz4yxWcwYm8UXSqCt3bJp3zHeDoX1ii0HeWaNM8tuTmo8swqco+pZ47KYNCIVn6+HI1yfP3SOOh+48NTXrT3jee/hh3bC3r92eZNxltPsCOuOW9ooSB/lBHtStgauiaBwFokJfp+haFQ6RaPS+dQl47DW8v7BulBYH+btHUd4bv0+ANIS4kLnrJ1b0ah0Av5eBqIxkJzt3Eadf8rLr61YQclFM6G6Ao6Vh7rQK5yvx8phfxlsXXbq0bc/eHJ4d3c/Ia2v3yaRqKFwFolBxhgmDE9hwvAUPj57DADlR+uPH1m/veMIL212ll1PDPg5f2wGswqGMWtcFtPHZJAQ8Pe/iGAy5Jzt3LpjLTQcdY64Owd3R5DveNXpWrdtJ78vPq1LcIeOujt3n8d1M0hOJIoonEWGiPzMJPIzk/jI+fkAHKhppHTn0eMjwn/60lashYDfcG5+xvEj6xljM0lLGIBzxcZAUpZzyzuv+23aWqF2/6nB3fF47xqne72r5OHdB3dHqKeMUPe5eJrCWWSIGp6awNVT87h6ah4A1Q0trN515Pggs1+9sp1fhi7fmjIy7fggs5kFWQxLGaQjU39cKGBHAbO736alAY7t7f4I/OBW2PZ3aKk7+T2+AKTldQnuzkE+ChIyBrx5IqejcBYRANITA1w+eQSXTz5x+dba45dvHeF3b+3m0dd2AjBheEqnEeFZjMxIdK/wQOKZR55b60zK0l3XeXU57HkTyvaeeulYMIVZ/lR4fwwk54TOsed0f0vM1JG4RJTCWUS6lRSM46IJ2VwUunyrubWd9RXVxweZ/WXdXn7/tnP5Vn5m4kkjwq2Xlr40xgnPxEzILep+m/Y2qD0AxypOOgKv2VFGUpyBI9ud67/rD4Nt7+YzfM5I81NCPHQ/ZfjJj4PJA9tmiXoKZxEJSzDOx4yxmcwYm8nnS86ird2yufLE5Vsvbz3IM+84l28l+GHM2pfJS09kZEYiI9MTGJmRSF5GAqMyEslNTyA+LgKDziLF53e6udPynGU8QzatWMGIkpIT27W3OYPY6g52uh06+X7tAahY7dxvrun+8wJJ3Yd4d7ekYU73vgwp+omLSJ/4fYbCkekUjkznjoudy7e2H6rjre1HWL5mE76UZPZWNbKhoprDdc2nvD87JZ5RGQknAjwjIfTVCfPslPier8cebD7/iUvIOKfn7VsaOoX3oW5CPXS0vm+d81zXrvUOiVmnCfLsTkflocfxaZqpLQYonEUkIowxnJWTwlk5KYxs2E5JyYkj0MaWNvZVN7KvqoGKqgb2VjWyr9q5v+1gLa+8d5D65pMvmQr4DbnpCYzsGt6hx3kZCQMzijySAonO9KYZo3vetuPceLchftA5Iq875FwjXnfw1MVNOviDJ4X45JpWqF8KCelnvsWn6QjdQ/STEJEBlxDwMy47mXHZ3Z9rtdZS3dDC3qpG9lY1hILbCfC9VQ28veMIlccaaWs/+Vx2anwceV2OuEdmJJKXnni8+zwYFyUDtTqfG8+e2PP2rc3OOfBuu9dP3DKqKqD6HWg8BvQwFiCY2iW003oO9YR0Z2S7wj2i9J0UEdcZY8hICpKRFGTKyO5nAGtrtxyoaTwe4E6IN1IRCvN3y6s50k33eU5qfJfgds5754WOxrOTPdh9Ho644Inz5Gfw5ooVlJSUQHu7cw68sTr827EKOLDpxOMewz0lvCP07sI9IU1zr3eicBaRqOD3GfLSnaPiGWMzu92mobktdLTdyN7QUfe+0P0t+2tYseUgDS0nd58H/T6n+zwj4aQu85HpieyqbmPPkXoyk4MkB/2nLiASTXy+E2HYF+3t0FwbZrBXhcJ974lwbzrW/Uj3zgLJpw31cfuPgq/UWXEtmOz8IRBMgfjQ12DyidfiEqL+vLvCWURiRmLQz/icFMbnpHT7urWWqvqWUHCHjsBD9/dVNfDm9sPsr2k6qfv8u28sB5xz4BlJQTKTAmQmBZ1bcuAMzwVJTwzgj8aj8u74fKFu7jQgjHPoXfUU7k3HTg72xmpn3fFDW6ChijGNx2B3D+Hewfi7Ce6UE4F+/HFqeK8Fkgf9OnaFs4gMGcYYMpODZCYHKRzZ/RFka1s7B2qa2FfdwMtvriF//CSO1jdzpL6ZqroWjtY3U1XfwvsHazm6q4Wq+mZa27vv7jXGmdwlMylIRtKJr1lJTg2dn8tMCpIVes5Tl5lFSj/D/eXlyym55EJornO655vroKnWCfzm2tD9rq91eVy/++Rtuy68ciaB5FODPD6l56P4zq/1gsJZRKSTOL/v+ACzmh1xlMw8c5BYa6lpaj0e3B3hfaSumar6Zo7Wnwj0/cca2bzvGEfrW07pXu8sKeg/EeTJweNH5ycdpSefuJ+RFCAlPi66u917YgwEEpxb8rDI7LOt1Znatdtg7xr63TyuOwhHdoReCz3XU9d9mBTOIiL9YIwhLSFAWkKAMcOSwn5fY0sbVfUnAv3o8aPykwP9SF0ze47Uc7S+heqGltPuL+A3pCcGyerS1X7sUDMb2UZ6olNjWmKAtIQ40hIDx5+LmhHtkeaPA38/zsN3Za1zbXt3Qd5UA9+7PuxdKZxFRFyQEPCTm+4nNz0h7Pe0tTuXnDlh3jnEnftV9c0cCT2/41Ada+qrOFLbwtIdW3qoxXc8uNM7hbfzXFyXYHeeS0twtk1NiCOut+uBxypjIJjk3Bjer10pnEVEooTfZ8hKds5NkxPee5YvX86Fl8yhuqGFYw0tHGts4VhDa+irczR+rLH1pNcO1Taz/VBd6LnWU64v7yo56D8e3OmJJ8K781F6d8GelhggNT4uOi9lG2AKZxGRGGaMISHgJyHgZ0Ra+EfpHay11De3cawxFOQNnYO8hepOQd8R7vuqG9lcWcOxhhZqmlo50zooxkBKfNxpgv3Ec+XlLdS9u4+koJ+koJ/k+DgSg36Sgx1f/TF1BK9wFhGR0zLGkBwfR3J8HHnpvV8atL3dUtvceuIo/aQwb+32iH73kfrjr9c2nZhv/JENa874WcE4nxPcnQI7KRjnBHp8HMlB/ymBnhQf1+U9cSTF+0N/BDivBVwIfYWziIgMGJ/vxIC5/O7njjmj1rZ2ahpbeemVlZx7/kzqmlppaG6jrrmN+uZW6pvbun3u+NemNiqPNYZe73itrceu+s6Cfp8T2IHThfypgd71ueRg7+JW4SwiIp4V5/eRmRwkO9HH2SNSI7JPay1Nre2nBHZ9Uyjsuz7X4nyta2478Z6mNvbXNFLf1HbSe3oT+meicBYRkSGl83n4zORgxPZrraW5rd0J7E6BXh8K8yt+FP6+FM4iIiIRYIwhPs5PfJyfPvTgnyR2hraJiIjECIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY8JK5yNMQuNMVuMMduMMfeeYbvrjTHWGFMcuRJFRESGlh7D2RjjBx4ArgKmADcbY6Z0s10qcA/wVqSLFBERGUrCOXKeBWyz1m631jYDTwHXdbPdD4AfAY0RrE9ERGTICSecRwF7Oj0uDz13nDHmfGC0tfa5CNYmIiIyJBlr7Zk3MOYGYKG19jOhx7cCs621d4Ue+4C/A7dba3caY1YAX7HWlnazrzuBOwFycnJmLFq0KJJtcUVtbS0pKSlul9EvsdAGiI12xEIbQO3wklhoA8RGO+bOnbvaWhvWmKy4MLapAEZ3epwfeq5DKlAErDDGAOQCzxpjru0a0Nbah4GHASZNmmRLSkrCqdHTVqxYQbS3IxbaALHRjlhoA6gdXhILbYDYaUe4wunWXgVMNMaMM8YEgY8Bz3a8aK2tttZmW2sLrLUFwJvAKcEsIiIi4ekxnK21rcBdwDJgE7DIWltmjPm+MebagS5QRERkqAmnWxtr7VJgaZfnvn2abUv6X5aIiMjQpRnCREREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeE1Y4G2MWGmO2GGO2GWPu7eb1fzLGbDTGvGuMeckYMzbypYqIiAwNPYazMcYPPABcBUwBbjbGTOmy2TtAsbX2XGAJ8ONIFyoiIjJUhHPkPAvYZq3dbq1tBp4Cruu8gbV2ubW2PvTwTSA/smWKiIgMHcZae+YNjLkBWGit/Uzo8a3AbGvtXafZ/udApbX2X7t57U7gToCcnJwZixYt6mf57qutrSUlJcXtMvolFtoAsdGOWGgDqB1eEgttgNhox9y5c1dba4vD2TYukh9sjLkFKAYu6+51a+3DwMMAkyZNsiUlJZH8eFesWLGCaG9HLLQBYqMdsdAGUDu8JBbaALHTjnCFE84VwOhOj/NDz53EGDMf+CZwmbW2KTLliYiIDD3hnHNeBUw0xowzxgSBjwHPdt7AGDMdeAi41lp7IPJlioiIDB09hrO1thW4C1gGbAIWWWvLjDHfN8ZcG9rsPiAFWGyMWWuMefY0uxMREZEehHXO2Vq7FFja5blvd7o/P8J1iYiIDFmaIUxERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPEbhLCIi4jEKZxEREY9ROIuIiHiMwllERMRjFM4iIiIeo3AWERHxGIWziIiIxyicRUREPCascDbGLDTGbDHGbDPG3NvN6/HGmD+EXn/LGFMQ6UJFRESGih7D2RjjBx4ArgKmADcbY6Z02ezTwFFr7QTgfuBHkS5URERkqAjnyHkWsM1au91a2ww8BVzXZZvrgMdD95cA84wxJnJlioiIDB3hhPMoYE+nx+Wh57rdxlrbClQDwyJRoIiIyFATN5gfZoy5E7gz9LDJGLNhMD9/gGQDh9wuop9ioQ0QG+2IhTaA2uElsdAGiI12TAp3w3DCuQIY3elxfui57rYpN8bEAenA4a47stY+DDwMYIwptdYWh1uoV8VCO2KhDRAb7YiFNoDa4SWx0AaIjXYYY0rD3Tacbu1VwERjzDhjTBD4GPBsl22eBW4L3b8B+Lu11oZbhIiIiJzQ45GztbbVGHMXsAzwA7+x1pYZY74PlFprnwUeAX5rjNkGHMEJcBEREemDsM45W2uXAku7PPftTvcbgRt7+dkP93J7r4qFdsRCGyA22hELbQC1w0tioQ0QG+0Iuw1Gvc8iIiLeouk7RUREPMaVcO5pOtBoYIz5jTHmQDRfDmaMGW2MWW6M2WiMKTPG3ON2Tb1ljEkwxrxtjFkXasP33K6pP4wxfmPMO8aYv7hdS18ZY3YaY9YbY9b2ZnSqlxhjMowxS4wxm40xm4wxF7pdU28ZYyaFfgYdt2PGmC+7XVdvGWP+MfR/e4Mx5vfGmAS3a+oLY8w9oTaUhfNzGPRu7dB0oFuBK3AmNFkF3Gyt3TiohfSTMWYOUAs8Ya0tcruevjDG5AF51to1xphUYDXwoWj6WYRmoku21tYaYwLASuAea+2bLpfWJ8aYfwKKgTRr7QfcrqcvjDE7gWJrbdRek2qMeRx41Vr769BVKknW2iq36+qr0O/dCmC2tXaX2/WEyxgzCuf/9BRrbYMxZhGw1Fr7mLuV9Y4xpghnds1ZQDPwPPA5a+22073HjSPncKYD9Txr7Ss4I9OjlrV2n7V2Teh+DbCJU2d/8zTrqA09DIRuUTmQwhiTD1wD/NrtWoYyY0w6MAfnKhSstc3RHMwh84D3oymYO4kDEkNzaCQBe12upy/OAd6y1taHZtF8GfjImd7gRjiHMx2oDLLQSmLTgbfcraT3Ql3Ba4EDwIvW2qhrQ8hPgf8HtLtdSD9Z4AVjzOrQrIDRZhxwEHg0dIrh18aYZLeL6qePAb93u4jestZWAD8BdgP7gGpr7QvuVtUnG4BLjTHDjDFJwNWcPLnXKTQgTDDGpABPA1+21h5zu57esta2WWun4cxeNyvUhRRVjDEfAA5Ya1e7XUsEXGKtPR9nJbsvhk4BRZM44Hzgl9ba6UAdEJVjYwBC3fLXAovdrqW3jDGZOD2r44CRQLIx5hZ3q+o9a+0mnNUaX8Dp0l4LtJ3pPW6EczjTgcogCZ2nfRp40lr7jNv19Eeo63E5sNDtWvrgYuDa0Pnap4DLjTH/625JfRM62sFaewD4I86prGhSDpR36oFZghPW0eoqYI21dr/bhfTBfGCHtfagtbYFeAa4yOWa+sRa+4i1doa1dg5wFGfs1Wm5Ec7hTAcqgyA0mOoRYJO19r/crqcvjDE5xpiM0P1EnIGGm92tqvestV+31uZbawtw/k/83VobdUcIxpjk0OBCQl3BV+J06UUNa20lsMcY07FIwTwgagZJduNmorBLO2Q3cIExJin0+2oeztiYqGOMGR76OgbnfPPvzrT9oK5KBaefDnSw6+gvY8zvgRIg2xhTDnzHWvuIu1X12sXArcD60DlbgG+EZoSLFnnA46HRqD5gkbU2ai9DigEjgD+GlnOPA35nrX3e3ZL65EvAk6EDiO3AHS7X0yehP5CuAP7B7Vr6wlr7ljFmCbAGaAXeIXpnCnvaGDMMaAG+2NMgQ80QJiIi4jEaECYiIuIxCmcRERGPUTiLiIh4jMJZRETEYxTOIiIiHqNwFhER8RiFs4iIiMconEVERDzm/wNk/OJBfU117QAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 576x360 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "def plot_learning_curves(history):\n",
    "    pd.DataFrame(history.history).plot(figsize=(8, 5))\n",
    "    plt.grid(True)\n",
    "    plt.gca().set_ylim(0, 1)\n",
    "    plt.show()\n",
    "plot_learning_curves(history)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 91us/sample - loss: 3.0039 - val_loss: 2.2811\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 1.5413 - val_loss: 1.4473\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 1.0946 - val_loss: 1.1101\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.9134 - val_loss: 0.9604\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.8287 - val_loss: 0.8837\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.7808 - val_loss: 0.8373\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.7491 - val_loss: 0.8040\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.7265 - val_loss: 0.7803\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.7079 - val_loss: 0.7587\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.6931 - val_loss: 0.7422\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6799 - val_loss: 0.7269\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.6680 - val_loss: 0.7132\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.6579 - val_loss: 0.7014\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6484 - val_loss: 0.6910\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.6400 - val_loss: 0.6813\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6324 - val_loss: 0.6724\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.6255 - val_loss: 0.6645\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.6189 - val_loss: 0.6571\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6128 - val_loss: 0.6504\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6072 - val_loss: 0.6440\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6018 - val_loss: 0.6379\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.5969 - val_loss: 0.6322\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 0.5922 - val_loss: 0.6269\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5878 - val_loss: 0.6220\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.5837 - val_loss: 0.6171\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.5798 - val_loss: 0.6129\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5761 - val_loss: 0.6090\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.5723 - val_loss: 0.6048\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.5689 - val_loss: 0.6012\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5655 - val_loss: 0.5974\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5624 - val_loss: 0.5941\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5594 - val_loss: 0.5911\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5565 - val_loss: 0.5878\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5537 - val_loss: 0.5845\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5512 - val_loss: 0.5815\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5486 - val_loss: 0.5791\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5462 - val_loss: 0.5764\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5438 - val_loss: 0.5738\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5414 - val_loss: 0.5714\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5393 - val_loss: 0.5688\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5369 - val_loss: 0.5660\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5349 - val_loss: 0.5640\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5329 - val_loss: 0.5617\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5309 - val_loss: 0.5595\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5288 - val_loss: 0.5569\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5270 - val_loss: 0.5548\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5251 - val_loss: 0.5528\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5232 - val_loss: 0.5505\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5214 - val_loss: 0.5484\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5197 - val_loss: 0.5464\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5179 - val_loss: 0.5446\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5163 - val_loss: 0.5427\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5147 - val_loss: 0.5411\n",
      "3870/3870 [==============================] - 0s 23us/sample - loss: 0.4830\n",
      "7740/7740 [==============================] - 0s 21us/sample - loss: 0.5133\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 4.2068 - val_loss: 2.5104\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.8832 - val_loss: 1.3657\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.1896 - val_loss: 1.0111\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.9359 - val_loss: 0.8763\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.8307 - val_loss: 0.8143\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.7718 - val_loss: 0.7787\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.7348 - val_loss: 0.7573\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.7100 - val_loss: 0.7430\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6919 - val_loss: 0.7322\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6778 - val_loss: 0.7231\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6661 - val_loss: 0.7149\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6561 - val_loss: 0.7075\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6473 - val_loss: 0.7005\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6393 - val_loss: 0.6936\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6320 - val_loss: 0.6872\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6253 - val_loss: 0.6810\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6189 - val_loss: 0.6747\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6129 - val_loss: 0.6690\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6072 - val_loss: 0.6634\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6018 - val_loss: 0.6576\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5965 - val_loss: 0.6522\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5916 - val_loss: 0.6467\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5867 - val_loss: 0.6414\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5821 - val_loss: 0.6362\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5775 - val_loss: 0.6312\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5730 - val_loss: 0.6262\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5687 - val_loss: 0.6213\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5645 - val_loss: 0.6163\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5604 - val_loss: 0.6118\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5564 - val_loss: 0.6071\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5526 - val_loss: 0.6025\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5488 - val_loss: 0.5981\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5451 - val_loss: 0.5934\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5416 - val_loss: 0.5893\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5381 - val_loss: 0.5849\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5347 - val_loss: 0.5808\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5315 - val_loss: 0.5767\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.5283 - val_loss: 0.5726\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5252 - val_loss: 0.5687\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.5222 - val_loss: 0.5646\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.5193 - val_loss: 0.5609\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 0.5165 - val_loss: 0.5573\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.5137 - val_loss: 0.5537\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5109 - val_loss: 0.5504\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.5083 - val_loss: 0.5470\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5056 - val_loss: 0.5442\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.5031 - val_loss: 0.5410\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5007 - val_loss: 0.5381\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.4984 - val_loss: 0.5354\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.4961 - val_loss: 0.5328\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4942 - val_loss: 0.5307\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4921 - val_loss: 0.5288\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.4903 - val_loss: 0.5267\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.4883 - val_loss: 0.5241\n",
      "Epoch 55/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.4866 - val_loss: 0.5220\n",
      "Epoch 56/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.4850 - val_loss: 0.5202\n",
      "Epoch 57/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.4833 - val_loss: 0.5184\n",
      "Epoch 58/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.4817 - val_loss: 0.5167\n",
      "Epoch 59/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4801 - val_loss: 0.5149\n",
      "Epoch 60/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.4786 - val_loss: 0.5134\n",
      "Epoch 61/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.4771 - val_loss: 0.5116\n",
      "3870/3870 [==============================] - 0s 19us/sample - loss: 0.4775\n",
      "7740/7740 [==============================] - 0s 21us/sample - loss: 0.4760\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 4.0175 - val_loss: 2.3775\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 1.4939 - val_loss: 1.1319\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.8747 - val_loss: 0.8417\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.7257 - val_loss: 0.7657\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.6790 - val_loss: 0.7341\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6557 - val_loss: 0.7145\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.6394 - val_loss: 0.6993\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.6260 - val_loss: 0.6861\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 1s 78us/sample - loss: 0.6144 - val_loss: 0.6746\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.6042 - val_loss: 0.6645\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.5951 - val_loss: 0.6551\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.5868 - val_loss: 0.6463\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5791 - val_loss: 0.6382\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5720 - val_loss: 0.6306\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.5653 - val_loss: 0.6234\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.5591 - val_loss: 0.6167\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.5533 - val_loss: 0.6104\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5478 - val_loss: 0.6046\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5426 - val_loss: 0.5991\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.5377 - val_loss: 0.5936\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 1s 79us/sample - loss: 0.5333 - val_loss: 0.5886\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5290 - val_loss: 0.5840\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5251 - val_loss: 0.5798\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.5211 - val_loss: 0.5754\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.5175 - val_loss: 0.5713\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.5142 - val_loss: 0.5674\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 0.5109 - val_loss: 0.5637\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.5078 - val_loss: 0.5601\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 1s 78us/sample - loss: 0.5049 - val_loss: 0.5567\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5022 - val_loss: 0.5535\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.4995 - val_loss: 0.5505\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.4970 - val_loss: 0.5477\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4946 - val_loss: 0.5450\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.4923 - val_loss: 0.5425\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4900 - val_loss: 0.5398\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4879 - val_loss: 0.5373\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4857 - val_loss: 0.5349\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4837 - val_loss: 0.5326\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4817 - val_loss: 0.5303\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4799 - val_loss: 0.5279\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.4781 - val_loss: 0.5258\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.4763 - val_loss: 0.5237\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.4746 - val_loss: 0.5217\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4730 - val_loss: 0.5198\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4714 - val_loss: 0.5177\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4699 - val_loss: 0.5156\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.4684 - val_loss: 0.5138\n",
      "3870/3870 [==============================] - 0s 34us/sample - loss: 0.5109\n",
      "7740/7740 [==============================] - 0s 31us/sample - loss: 0.4672\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 87us/sample - loss: 3.6827 - val_loss: 1.9206\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 1.3345 - val_loss: 1.0427\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.9024 - val_loss: 0.8638\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.7930 - val_loss: 0.8095\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.7520 - val_loss: 0.7801\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 1s 85us/sample - loss: 0.7271 - val_loss: 0.7581\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 0.7078 - val_loss: 0.7397\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 1s 79us/sample - loss: 0.6912 - val_loss: 0.7232\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 1s 83us/sample - loss: 0.6762 - val_loss: 0.7077\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6628 - val_loss: 0.6947\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6507 - val_loss: 0.6824\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 46us/sample - loss: 0.6392 - val_loss: 0.6710\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 46us/sample - loss: 0.6290 - val_loss: 0.6601\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.6190 - val_loss: 0.6501\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.6101 - val_loss: 0.6403\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6016 - val_loss: 0.6315\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5936 - val_loss: 0.6228\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5859 - val_loss: 0.6148\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5790 - val_loss: 0.6071\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5723 - val_loss: 0.6002\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5659 - val_loss: 0.5935\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5598 - val_loss: 0.5868\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5542 - val_loss: 0.5809\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5487 - val_loss: 0.5749\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5436 - val_loss: 0.5693\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5387 - val_loss: 0.5642\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5341 - val_loss: 0.5592\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5296 - val_loss: 0.5541\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5252 - val_loss: 0.5493\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5214 - val_loss: 0.5446\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5176 - val_loss: 0.5408\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5138 - val_loss: 0.5361\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5104 - val_loss: 0.5322\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5069 - val_loss: 0.5286\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5036 - val_loss: 0.5247\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5007 - val_loss: 0.5216\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4977 - val_loss: 0.5194\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4947 - val_loss: 0.5148\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4923 - val_loss: 0.5123\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4896 - val_loss: 0.5097\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4870 - val_loss: 0.5065\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4847 - val_loss: 0.5047\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4824 - val_loss: 0.5014\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4801 - val_loss: 0.4996\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4780 - val_loss: 0.4967\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4757 - val_loss: 0.4945\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4740 - val_loss: 0.4927\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4718 - val_loss: 0.4897\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4702 - val_loss: 0.4880\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.4684 - val_loss: 0.4856\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4668 - val_loss: 0.4840\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.4652 - val_loss: 0.4821\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4635 - val_loss: 0.4797\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4620 - val_loss: 0.4795\n",
      "Epoch 55/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4605 - val_loss: 0.4767\n",
      "Epoch 56/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4591 - val_loss: 0.4754\n",
      "3870/3870 [==============================] - 0s 20us/sample - loss: 0.4329\n",
      "7740/7740 [==============================] - 0s 20us/sample - loss: 0.4578\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 3.3305 - val_loss: 1.9549\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 1.3334 - val_loss: 1.1097\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.8915 - val_loss: 0.8788\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.7553 - val_loss: 0.7910\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6998 - val_loss: 0.7468\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6688 - val_loss: 0.7190\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6474 - val_loss: 0.6987\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6303 - val_loss: 0.6822\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6164 - val_loss: 0.6684\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6039 - val_loss: 0.6558\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5929 - val_loss: 0.6450\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5828 - val_loss: 0.6345\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5735 - val_loss: 0.6248\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5650 - val_loss: 0.6156\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5572 - val_loss: 0.6078\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5500 - val_loss: 0.6002\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.5433 - val_loss: 0.5929\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.5370 - val_loss: 0.5855\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 1s 76us/sample - loss: 0.5313 - val_loss: 0.5796\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5260 - val_loss: 0.5738\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5210 - val_loss: 0.5683\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5164 - val_loss: 0.5632\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5121 - val_loss: 0.5585\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5082 - val_loss: 0.5541\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5045 - val_loss: 0.5496\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5011 - val_loss: 0.5455\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 46us/sample - loss: 0.4977 - val_loss: 0.5419\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 0.4948 - val_loss: 0.5383\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.4920 - val_loss: 0.5350\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 0.4894 - val_loss: 0.5318\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4869 - val_loss: 0.5286\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.4845 - val_loss: 0.5258\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.4823 - val_loss: 0.5231\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.4800 - val_loss: 0.5207\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.4780 - val_loss: 0.5178\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.4760 - val_loss: 0.5154\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 1s 93us/sample - loss: 0.4742 - val_loss: 0.5134\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 1s 91us/sample - loss: 0.4725 - val_loss: 0.5115\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 0.4706 - val_loss: 0.5093\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 1s 98us/sample - loss: 0.4690 - val_loss: 0.5073\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.4675 - val_loss: 0.5053\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4659 - val_loss: 0.5031\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 0.4644 - val_loss: 0.5015\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.4630 - val_loss: 0.4998\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 1s 77us/sample - loss: 0.4616 - val_loss: 0.4981\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.4602 - val_loss: 0.4962\n",
      "3870/3870 [==============================] - 0s 27us/sample - loss: 0.4684\n",
      "7740/7740 [==============================] - 0s 27us/sample - loss: 0.4591\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 3.7047 - val_loss: 1.8843\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 1.3238 - val_loss: 1.1391\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 0.9435 - val_loss: 0.9499\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.8262 - val_loss: 0.8703\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.7689 - val_loss: 0.8256\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 0.7328 - val_loss: 0.7947\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 1s 86us/sample - loss: 0.7064 - val_loss: 0.7713\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 1s 83us/sample - loss: 0.6848 - val_loss: 0.7516\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 1s 104us/sample - loss: 0.6660 - val_loss: 0.7335\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 1s 104us/sample - loss: 0.6497 - val_loss: 0.7177\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 1s 138us/sample - loss: 0.6348 - val_loss: 0.7027\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 1s 88us/sample - loss: 0.6211 - val_loss: 0.6899\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - ETA: 0s - loss: 0.612 - 1s 94us/sample - loss: 0.6088 - val_loss: 0.6769\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.5974 - val_loss: 0.6651\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.5868 - val_loss: 0.6540\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 1s 82us/sample - loss: 0.5770 - val_loss: 0.6438\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.5679 - val_loss: 0.6335\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5592 - val_loss: 0.6238\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5511 - val_loss: 0.6147\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5436 - val_loss: 0.6066\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5362 - val_loss: 0.5984\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5300 - val_loss: 0.5912\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5235 - val_loss: 0.5839\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5178 - val_loss: 0.5771\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5123 - val_loss: 0.5706\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5072 - val_loss: 0.5649\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5023 - val_loss: 0.5596\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4980 - val_loss: 0.5543\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4939 - val_loss: 0.5497\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4902 - val_loss: 0.5451\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 1s 110us/sample - loss: 0.4866 - val_loss: 0.5408\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 0.4832 - val_loss: 0.5365\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 1s 103us/sample - loss: 0.4800 - val_loss: 0.5331\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 1s 95us/sample - loss: 0.4769 - val_loss: 0.5298\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 1s 111us/sample - loss: 0.4741 - val_loss: 0.5261\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 1s 107us/sample - loss: 0.4713 - val_loss: 0.5234\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 1s 101us/sample - loss: 0.4687 - val_loss: 0.5199\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.4663 - val_loss: 0.5170\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.4638 - val_loss: 0.5139\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.4615 - val_loss: 0.5109\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 0.4593 - val_loss: 0.5086\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.4570 - val_loss: 0.5061\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4550 - val_loss: 0.5034\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 1s 90us/sample - loss: 0.4530 - val_loss: 0.5012\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 1s 88us/sample - loss: 0.4509 - val_loss: 0.4987\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 1s 97us/sample - loss: 0.4490 - val_loss: 0.4967\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 1s 94us/sample - loss: 0.4472 - val_loss: 0.4943\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4454 - val_loss: 0.4922\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.4437 - val_loss: 0.4901\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 1s 86us/sample - loss: 0.4421 - val_loss: 0.4885\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 1s 90us/sample - loss: 0.4403 - val_loss: 0.4866\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 0.4388 - val_loss: 0.4851\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.4372 - val_loss: 0.4833\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 0.4356 - val_loss: 0.4813\n",
      "Epoch 55/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.4343 - val_loss: 0.4797\n",
      "Epoch 56/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.4328 - val_loss: 0.4779\n",
      "3870/3870 [==============================] - 0s 24us/sample - loss: 0.4774\n",
      "7740/7740 [==============================] - 0s 23us/sample - loss: 0.4315\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 76us/sample - loss: 4.9486 - val_loss: 3.4650\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 1s 99us/sample - loss: 2.5374 - val_loss: 2.0472\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 1s 113us/sample - loss: 1.6284 - val_loss: 1.4477\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 1.2128 - val_loss: 1.1494\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 1s 94us/sample - loss: 0.9954 - val_loss: 0.9845\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 0.8727 - val_loss: 0.8872\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.7984 - val_loss: 0.8252\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.7509 - val_loss: 0.7837\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 0.7191 - val_loss: 0.7545\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.6964 - val_loss: 0.7325\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 1s 82us/sample - loss: 0.6791 - val_loss: 0.7149\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 0.6651 - val_loss: 0.7004\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 1s 76us/sample - loss: 0.6533 - val_loss: 0.6878\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.6429 - val_loss: 0.6763\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 0.6335 - val_loss: 0.6661\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 0.6248 - val_loss: 0.6565\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6168 - val_loss: 0.6481\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.6092 - val_loss: 0.6399\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.6020 - val_loss: 0.6323\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 0.5953 - val_loss: 0.6251\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5890 - val_loss: 0.6183\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.5830 - val_loss: 0.6118\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5773 - val_loss: 0.6057\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5718 - val_loss: 0.5998\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5667 - val_loss: 0.5944\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 1s 77us/sample - loss: 0.5617 - val_loss: 0.5892\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.5570 - val_loss: 0.5840\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5526 - val_loss: 0.5790\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.5483 - val_loss: 0.5741\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 1s 77us/sample - loss: 0.5442 - val_loss: 0.5697\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.5403 - val_loss: 0.5656\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 0.5365 - val_loss: 0.5613\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.5329 - val_loss: 0.5574\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 0.5294 - val_loss: 0.5536\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.5261 - val_loss: 0.5498\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 0.5229 - val_loss: 0.5463\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5198 - val_loss: 0.5428\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.5169 - val_loss: 0.5396\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 1s 87us/sample - loss: 0.5140 - val_loss: 0.5365\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.5113 - val_loss: 0.5335\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.5087 - val_loss: 0.5306\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.5062 - val_loss: 0.5278\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5037 - val_loss: 0.5250\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 1s 86us/sample - loss: 0.5014 - val_loss: 0.5225\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.4992 - val_loss: 0.5200\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.4971 - val_loss: 0.5177\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4950 - val_loss: 0.5155\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4931 - val_loss: 0.5132\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.4912 - val_loss: 0.5113\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.4894 - val_loss: 0.5093\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4877 - val_loss: 0.5074\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.4860 - val_loss: 0.5055\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 1s 78us/sample - loss: 0.4844 - val_loss: 0.5037\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.4829 - val_loss: 0.5019\n",
      "3870/3870 [==============================] - 0s 23us/sample - loss: 0.4473\n",
      "7740/7740 [==============================] - 0s 20us/sample - loss: 0.4818\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 77us/sample - loss: 4.1616 - val_loss: 2.8693\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 2.0823 - val_loss: 1.6558\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 1.3308 - val_loss: 1.1835\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 1.0181 - val_loss: 0.9821\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.8704 - val_loss: 0.8831\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.7927 - val_loss: 0.8278\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.7473 - val_loss: 0.7940\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.7184 - val_loss: 0.7718\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 46us/sample - loss: 0.6988 - val_loss: 0.7558\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.6844 - val_loss: 0.7433\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6731 - val_loss: 0.7329\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6636 - val_loss: 0.7236\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6554 - val_loss: 0.7153\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6479 - val_loss: 0.7075\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6411 - val_loss: 0.7002\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6346 - val_loss: 0.6933\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 46us/sample - loss: 0.6286 - val_loss: 0.6867\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6228 - val_loss: 0.6804\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6174 - val_loss: 0.6745\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6121 - val_loss: 0.6687\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6071 - val_loss: 0.6631\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6023 - val_loss: 0.6579\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.5977 - val_loss: 0.6528\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.5933 - val_loss: 0.6479\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5890 - val_loss: 0.6431\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5850 - val_loss: 0.6385\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5810 - val_loss: 0.6341\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5772 - val_loss: 0.6298\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 0.5735 - val_loss: 0.6257\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 1s 77us/sample - loss: 0.5700 - val_loss: 0.6218\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5666 - val_loss: 0.6180\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5633 - val_loss: 0.6141\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 1s 118us/sample - loss: 0.5601 - val_loss: 0.6105\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 1s 93us/sample - loss: 0.5570 - val_loss: 0.6069\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.5539 - val_loss: 0.6036\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5511 - val_loss: 0.6002\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5482 - val_loss: 0.5969\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5455 - val_loss: 0.5937\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 1s 82us/sample - loss: 0.5429 - val_loss: 0.5906\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5403 - val_loss: 0.5876\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5378 - val_loss: 0.5847\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5353 - val_loss: 0.5819\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5329 - val_loss: 0.5792\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5306 - val_loss: 0.5766\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5284 - val_loss: 0.5740\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5263 - val_loss: 0.5715\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5241 - val_loss: 0.5690\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5221 - val_loss: 0.5666\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5201 - val_loss: 0.5643\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5181 - val_loss: 0.5620\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5163 - val_loss: 0.5598\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5144 - val_loss: 0.5577\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5126 - val_loss: 0.5556\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5108 - val_loss: 0.5535\n",
      "Epoch 55/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.5091 - val_loss: 0.5515\n",
      "Epoch 56/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.5075 - val_loss: 0.5496\n",
      "Epoch 57/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5058 - val_loss: 0.5477\n",
      "Epoch 58/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5043 - val_loss: 0.5458\n",
      "Epoch 59/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5027 - val_loss: 0.5439\n",
      "Epoch 60/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5012 - val_loss: 0.5422\n",
      "Epoch 61/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4998 - val_loss: 0.5404\n",
      "Epoch 62/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4983 - val_loss: 0.5387\n",
      "3870/3870 [==============================] - 0s 24us/sample - loss: 0.5072\n",
      "7740/7740 [==============================] - 0s 23us/sample - loss: 0.4973\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 4.1320 - val_loss: 2.9431\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 2.1226 - val_loss: 1.7781\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 1.3750 - val_loss: 1.2848\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.0364 - val_loss: 1.0434\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.8707 - val_loss: 0.9155\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.7837 - val_loss: 0.8454\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.7362 - val_loss: 0.8038\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.7077 - val_loss: 0.7786\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.6895 - val_loss: 0.7616\n",
      "Epoch 10/100\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6766 - val_loss: 0.7496\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6667 - val_loss: 0.7398\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6584 - val_loss: 0.7313\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6512 - val_loss: 0.7238\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6446 - val_loss: 0.7174\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6385 - val_loss: 0.7107\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6327 - val_loss: 0.7044\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.6272 - val_loss: 0.6984\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.6219 - val_loss: 0.6929\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6168 - val_loss: 0.6873\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6119 - val_loss: 0.6816\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6072 - val_loss: 0.6764\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6026 - val_loss: 0.6715\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5982 - val_loss: 0.6665\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5939 - val_loss: 0.6619\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5898 - val_loss: 0.6574\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5857 - val_loss: 0.6527\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5818 - val_loss: 0.6485\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5780 - val_loss: 0.6439\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5744 - val_loss: 0.6397\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5708 - val_loss: 0.6355\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5674 - val_loss: 0.6316\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5640 - val_loss: 0.6276\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5607 - val_loss: 0.6239\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5575 - val_loss: 0.6203\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5545 - val_loss: 0.6168\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5515 - val_loss: 0.6134\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5486 - val_loss: 0.6100\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5458 - val_loss: 0.6068\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5429 - val_loss: 0.6037\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5403 - val_loss: 0.6005\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5377 - val_loss: 0.5973\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5351 - val_loss: 0.5944\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5327 - val_loss: 0.5914\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5303 - val_loss: 0.5885\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5279 - val_loss: 0.5858\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5257 - val_loss: 0.5832\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5234 - val_loss: 0.5804\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5213 - val_loss: 0.5778\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5192 - val_loss: 0.5754\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5171 - val_loss: 0.5731\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5151 - val_loss: 0.5706\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5132 - val_loss: 0.5683\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5113 - val_loss: 0.5660\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5094 - val_loss: 0.5637\n",
      "Epoch 55/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5077 - val_loss: 0.5616\n",
      "Epoch 56/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5059 - val_loss: 0.5596\n",
      "Epoch 57/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5042 - val_loss: 0.5574\n",
      "Epoch 58/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5025 - val_loss: 0.5553\n",
      "Epoch 59/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5009 - val_loss: 0.5533\n",
      "Epoch 60/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4993 - val_loss: 0.5514\n",
      "Epoch 61/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4977 - val_loss: 0.5496\n",
      "Epoch 62/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4962 - val_loss: 0.5477\n",
      "Epoch 63/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4947 - val_loss: 0.5459\n",
      "Epoch 64/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4933 - val_loss: 0.5441\n",
      "3870/3870 [==============================] - 0s 22us/sample - loss: 0.5362\n",
      "7740/7740 [==============================] - 0s 24us/sample - loss: 0.4922\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 103us/sample - loss: 4.9075 - val_loss: 4.5499\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 1s 93us/sample - loss: 3.8956 - val_loss: 3.6598\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 3.1348 - val_loss: 2.9753\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 2.5582 - val_loss: 2.4663\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 2.1400 - val_loss: 2.0975\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 1.8451 - val_loss: 1.8404\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 1.6455 - val_loss: 1.6670\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 1.5169 - val_loss: 1.5560\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 1.4368 - val_loss: 1.4851\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 1.3868 - val_loss: 1.4395\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 1.3539 - val_loss: 1.4070\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 1.3295 - val_loss: 1.3817\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 1.3096 - val_loss: 1.3603\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 1.2917 - val_loss: 1.3406\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 1.2746 - val_loss: 1.3215\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 1.2574 - val_loss: 1.3025\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 1.2395 - val_loss: 1.2826\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 1.2205 - val_loss: 1.2615\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 1.2001 - val_loss: 1.2389\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 1.1781 - val_loss: 1.2145\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 1.1541 - val_loss: 1.1881\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 1.1278 - val_loss: 1.1590\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 1.0989 - val_loss: 1.1276\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 1.0674 - val_loss: 1.0934\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 1.0334 - val_loss: 1.0570\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.9977 - val_loss: 1.0193\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.9612 - val_loss: 0.9816\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.9254 - val_loss: 0.9460\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.8934 - val_loss: 0.9151\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.8657 - val_loss: 0.8886\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.8420 - val_loss: 0.8661\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.8220 - val_loss: 0.8471\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.8051 - val_loss: 0.8311\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.7907 - val_loss: 0.8175\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.7784 - val_loss: 0.8060\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.7679 - val_loss: 0.7960\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.7588 - val_loss: 0.7873\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.7508 - val_loss: 0.7797\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.7437 - val_loss: 0.7728\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.7376 - val_loss: 0.7668\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.7321 - val_loss: 0.7613\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.7272 - val_loss: 0.7564\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.7228 - val_loss: 0.7519\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.7188 - val_loss: 0.7477\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.7151 - val_loss: 0.7437\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.7118 - val_loss: 0.7402\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.7086 - val_loss: 0.7369\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.7056 - val_loss: 0.7336\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.7028 - val_loss: 0.7305\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.7001 - val_loss: 0.7276\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6974 - val_loss: 0.7248\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6950 - val_loss: 0.7222\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6925 - val_loss: 0.7195\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6901 - val_loss: 0.7169\n",
      "Epoch 55/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6879 - val_loss: 0.7145\n",
      "Epoch 56/100\n",
      "7740/7740 [==============================] - 1s 78us/sample - loss: 0.6857 - val_loss: 0.7123\n",
      "Epoch 57/100\n",
      "7740/7740 [==============================] - 1s 92us/sample - loss: 0.6835 - val_loss: 0.7100\n",
      "Epoch 58/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6815 - val_loss: 0.7076\n",
      "Epoch 59/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6794 - val_loss: 0.7054\n",
      "Epoch 60/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6775 - val_loss: 0.7033\n",
      "Epoch 61/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6756 - val_loss: 0.7012\n",
      "Epoch 62/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6737 - val_loss: 0.6992\n",
      "Epoch 63/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6718 - val_loss: 0.6971\n",
      "Epoch 64/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6700 - val_loss: 0.6952\n",
      "Epoch 65/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6682 - val_loss: 0.6933\n",
      "3870/3870 [==============================] - 0s 24us/sample - loss: 0.6110\n",
      "7740/7740 [==============================] - 0s 29us/sample - loss: 0.6670\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 86us/sample - loss: 4.6785 - val_loss: 3.8557\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 2.9744 - val_loss: 2.4839\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 1.9282 - val_loss: 1.7206\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 1.4674 - val_loss: 1.4336\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 1.2748 - val_loss: 1.2750\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 1.1466 - val_loss: 1.1523\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 1.0371 - val_loss: 1.0446\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.9416 - val_loss: 0.9566\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.8655 - val_loss: 0.8872\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.8056 - val_loss: 0.8328\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.7588 - val_loss: 0.7902\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.7219 - val_loss: 0.7561\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6922 - val_loss: 0.7289\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6691 - val_loss: 0.7072\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6506 - val_loss: 0.6892\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6353 - val_loss: 0.6741\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6226 - val_loss: 0.6614\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6117 - val_loss: 0.6501\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6020 - val_loss: 0.6403\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5935 - val_loss: 0.6314\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.5858 - val_loss: 0.6231\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5790 - val_loss: 0.6155\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5727 - val_loss: 0.6086\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5670 - val_loss: 0.6021\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5617 - val_loss: 0.5962\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5567 - val_loss: 0.5904\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5521 - val_loss: 0.5852\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5477 - val_loss: 0.5805\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5437 - val_loss: 0.5754\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5399 - val_loss: 0.5709\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5363 - val_loss: 0.5668\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5330 - val_loss: 0.5628\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5296 - val_loss: 0.5593\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5266 - val_loss: 0.5553\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5235 - val_loss: 0.5519\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5205 - val_loss: 0.5487\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5177 - val_loss: 0.5453\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5150 - val_loss: 0.5422\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5124 - val_loss: 0.5392\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5098 - val_loss: 0.5363\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5075 - val_loss: 0.5338\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5052 - val_loss: 0.5313\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5030 - val_loss: 0.5287\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5009 - val_loss: 0.5264\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.4989 - val_loss: 0.5243\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4971 - val_loss: 0.5223\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4952 - val_loss: 0.5202\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4936 - val_loss: 0.5183\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4919 - val_loss: 0.5164\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4902 - val_loss: 0.5147\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4887 - val_loss: 0.5128\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4872 - val_loss: 0.5112\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4857 - val_loss: 0.5096\n",
      "3870/3870 [==============================] - 0s 23us/sample - loss: 0.4916\n",
      "7740/7740 [==============================] - 0s 22us/sample - loss: 0.4848\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 89us/sample - loss: 4.8917 - val_loss: 4.0174\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 1s 85us/sample - loss: 3.0257 - val_loss: 2.5556\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 1s 108us/sample - loss: 1.9748 - val_loss: 1.7994\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 1.5106 - val_loss: 1.5045\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 1s 77us/sample - loss: 1.3442 - val_loss: 1.3958\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 1s 76us/sample - loss: 1.2821 - val_loss: 1.3472\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 1.2517 - val_loss: 1.3204\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 1.2327 - val_loss: 1.3036\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 1.2187 - val_loss: 1.2917\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 1.2076 - val_loss: 1.2821\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 1s 82us/sample - loss: 1.1984 - val_loss: 1.2750\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 1.1902 - val_loss: 1.2686\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 1.1828 - val_loss: 1.2625\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 1.1757 - val_loss: 1.2566\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 1.1687 - val_loss: 1.2508\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 1.1619 - val_loss: 1.2443\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 1.1552 - val_loss: 1.2377\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 1.1484 - val_loss: 1.2305\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 1.1414 - val_loss: 1.2231\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 1.1343 - val_loss: 1.2159\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 1.1272 - val_loss: 1.2084\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 1s 77us/sample - loss: 1.1201 - val_loss: 1.2006\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 1.1126 - val_loss: 1.1927\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 1.1049 - val_loss: 1.1843\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 1.0967 - val_loss: 1.1757\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 1.0881 - val_loss: 1.1658\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 1.0792 - val_loss: 1.1558\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 1.0697 - val_loss: 1.1453\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 1.0598 - val_loss: 1.1342\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 1.0493 - val_loss: 1.1226\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 1.0382 - val_loss: 1.1100\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 1.0263 - val_loss: 1.0966\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 1.0136 - val_loss: 1.0825\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 1.0004 - val_loss: 1.0678\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.9869 - val_loss: 1.0527\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.9731 - val_loss: 1.0373\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.9591 - val_loss: 1.0214\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.9452 - val_loss: 1.0062\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.9317 - val_loss: 0.9914\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.9185 - val_loss: 0.9770\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.9054 - val_loss: 0.9619\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 1s 86us/sample - loss: 0.8923 - val_loss: 0.9473\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.8793 - val_loss: 0.9329\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.8667 - val_loss: 0.9187\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 1s 79us/sample - loss: 0.8541 - val_loss: 0.9044\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 1s 79us/sample - loss: 0.8414 - val_loss: 0.8903\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.8291 - val_loss: 0.8766\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.8170 - val_loss: 0.8634\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.8051 - val_loss: 0.8507\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 0.7937 - val_loss: 0.8385\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 1s 86us/sample - loss: 0.7823 - val_loss: 0.8259\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.7711 - val_loss: 0.8138\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.7604 - val_loss: 0.8021\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.7499 - val_loss: 0.7907\n",
      "Epoch 55/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.7397 - val_loss: 0.7796\n",
      "Epoch 56/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.7301 - val_loss: 0.7694\n",
      "Epoch 57/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.7208 - val_loss: 0.7598\n",
      "Epoch 58/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 0.7119 - val_loss: 0.7504\n",
      "Epoch 59/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.7036 - val_loss: 0.7419\n",
      "Epoch 60/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6960 - val_loss: 0.7340\n",
      "Epoch 61/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6888 - val_loss: 0.7262\n",
      "Epoch 62/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.6819 - val_loss: 0.7191\n",
      "Epoch 63/100\n",
      "7740/7740 [==============================] - 1s 82us/sample - loss: 0.6751 - val_loss: 0.7122\n",
      "Epoch 64/100\n",
      "7740/7740 [==============================] - 1s 79us/sample - loss: 0.6687 - val_loss: 0.7057\n",
      "Epoch 65/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6624 - val_loss: 0.6994\n",
      "Epoch 66/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6564 - val_loss: 0.6930\n",
      "Epoch 67/100\n",
      "7740/7740 [==============================] - 1s 85us/sample - loss: 0.6506 - val_loss: 0.6873\n",
      "Epoch 68/100\n",
      "7740/7740 [==============================] - 1s 87us/sample - loss: 0.6449 - val_loss: 0.6817\n",
      "Epoch 69/100\n",
      "7740/7740 [==============================] - 1s 83us/sample - loss: 0.6394 - val_loss: 0.6761\n",
      "Epoch 70/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.6343 - val_loss: 0.6715\n",
      "Epoch 71/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.6292 - val_loss: 0.6665\n",
      "Epoch 72/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6243 - val_loss: 0.6617\n",
      "Epoch 73/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.6195 - val_loss: 0.6570\n",
      "Epoch 74/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6149 - val_loss: 0.6527\n",
      "Epoch 75/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.6104 - val_loss: 0.6486\n",
      "Epoch 76/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.6061 - val_loss: 0.6446\n",
      "Epoch 77/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6017 - val_loss: 0.6405\n",
      "Epoch 78/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5976 - val_loss: 0.6368\n",
      "Epoch 79/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.5936 - val_loss: 0.6330\n",
      "Epoch 80/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5897 - val_loss: 0.6291\n",
      "Epoch 81/100\n",
      "7740/7740 [==============================] - 1s 86us/sample - loss: 0.5859 - val_loss: 0.6257\n",
      "Epoch 82/100\n",
      "7740/7740 [==============================] - 1s 85us/sample - loss: 0.5822 - val_loss: 0.6222\n",
      "Epoch 83/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5788 - val_loss: 0.6190\n",
      "Epoch 84/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5753 - val_loss: 0.6157\n",
      "Epoch 85/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5719 - val_loss: 0.6124\n",
      "Epoch 86/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5685 - val_loss: 0.6094\n",
      "Epoch 87/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5652 - val_loss: 0.6065\n",
      "Epoch 88/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5620 - val_loss: 0.6035\n",
      "Epoch 89/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5586 - val_loss: 0.6005\n",
      "Epoch 90/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5553 - val_loss: 0.5976\n",
      "Epoch 91/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5521 - val_loss: 0.5947\n",
      "Epoch 92/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5487 - val_loss: 0.5916\n",
      "Epoch 93/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5453 - val_loss: 0.5885\n",
      "Epoch 94/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5417 - val_loss: 0.5853\n",
      "Epoch 95/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5382 - val_loss: 0.5820\n",
      "Epoch 96/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5347 - val_loss: 0.5787\n",
      "Epoch 97/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5312 - val_loss: 0.5755\n",
      "Epoch 98/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5277 - val_loss: 0.5722\n",
      "Epoch 99/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5242 - val_loss: 0.5690\n",
      "Epoch 100/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5209 - val_loss: 0.5658\n",
      "3870/3870 [==============================] - 0s 21us/sample - loss: 0.5581\n",
      "7740/7740 [==============================] - 0s 19us/sample - loss: 0.5189\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 106us/sample - loss: 2.0194 - val_loss: 1.0317\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.8192 - val_loss: 0.7661\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.6890 - val_loss: 0.7062\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.6471 - val_loss: 0.6718\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.6215 - val_loss: 0.6456\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.5993 - val_loss: 0.6241\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5808 - val_loss: 0.6061\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.5631 - val_loss: 0.5861\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.5479 - val_loss: 0.5713\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5330 - val_loss: 0.5567\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.5198 - val_loss: 0.5427\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.5077 - val_loss: 0.5306\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.4967 - val_loss: 0.5192\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.4864 - val_loss: 0.5094\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.4776 - val_loss: 0.4982\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.4694 - val_loss: 0.4893\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.4617 - val_loss: 0.4818\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.4545 - val_loss: 0.4730\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.4478 - val_loss: 0.4666\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.4420 - val_loss: 0.4589\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.4362 - val_loss: 0.4529\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.4301 - val_loss: 0.4466\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 0.4258 - val_loss: 0.4409\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.4215 - val_loss: 0.4373\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4167 - val_loss: 0.4350\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.4126 - val_loss: 0.4262\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.4093 - val_loss: 0.4224\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 0.4052 - val_loss: 0.4205\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.4017 - val_loss: 0.4165\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.3991 - val_loss: 0.4120\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.3957 - val_loss: 0.4090\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.3930 - val_loss: 0.4059\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.3900 - val_loss: 0.4032\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.3876 - val_loss: 0.4011\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 0.3852 - val_loss: 0.3975\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 0.3829 - val_loss: 0.3960\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 1s 71us/sample - loss: 0.3808 - val_loss: 0.3938\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.3786 - val_loss: 0.3921\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.3762 - val_loss: 0.3893\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.3745 - val_loss: 0.3873\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.3729 - val_loss: 0.3854\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.3705 - val_loss: 0.3840\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.3694 - val_loss: 0.3817\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 1s 114us/sample - loss: 0.3675 - val_loss: 0.3809\n",
      "3870/3870 [==============================] - 0s 43us/sample - loss: 0.3550\n",
      "7740/7740 [==============================] - 1s 76us/sample - loss: 0.3654\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 130us/sample - loss: 2.0760 - val_loss: 1.1864\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 1s 102us/sample - loss: 0.9382 - val_loss: 0.8298\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 1s 108us/sample - loss: 0.7323 - val_loss: 0.7293\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 1s 148us/sample - loss: 0.6654 - val_loss: 0.6870\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 1s 105us/sample - loss: 0.6299 - val_loss: 0.6588\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 1s 103us/sample - loss: 0.6036 - val_loss: 0.6339\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 1s 150us/sample - loss: 0.5823 - val_loss: 0.6149\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 1s 115us/sample - loss: 0.5641 - val_loss: 0.5967\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 1s 155us/sample - loss: 0.5481 - val_loss: 0.5811\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 1s 136us/sample - loss: 0.5344 - val_loss: 0.5663\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 1s 106us/sample - loss: 0.5213 - val_loss: 0.5537\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 0.5102 - val_loss: 0.5408\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 1s 106us/sample - loss: 0.4993 - val_loss: 0.5308\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 1s 87us/sample - loss: 0.4898 - val_loss: 0.5191\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 1s 92us/sample - loss: 0.4814 - val_loss: 0.5102\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 1s 83us/sample - loss: 0.4724 - val_loss: 0.5045\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 0.4646 - val_loss: 0.4906\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.4573 - val_loss: 0.4828\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 1s 96us/sample - loss: 0.4508 - val_loss: 0.4749\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 0.4441 - val_loss: 0.4677\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.4391 - val_loss: 0.4613\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 1s 92us/sample - loss: 0.4330 - val_loss: 0.4567\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 0.4279 - val_loss: 0.4506\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 1s 90us/sample - loss: 0.4233 - val_loss: 0.4601\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 1s 92us/sample - loss: 0.4194 - val_loss: 0.4450\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 1s 95us/sample - loss: 0.4132 - val_loss: 0.4367\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 1s 111us/sample - loss: 0.4096 - val_loss: 0.4326\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 1s 108us/sample - loss: 0.4081 - val_loss: 0.4293\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 1s 87us/sample - loss: 0.4042 - val_loss: 0.4305\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 0.4002 - val_loss: 0.4208\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 1s 78us/sample - loss: 0.3978 - val_loss: 0.4179\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 1s 95us/sample - loss: 0.3950 - val_loss: 0.4270\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 1s 88us/sample - loss: 0.3923 - val_loss: 0.4149\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 1s 93us/sample - loss: 0.3892 - val_loss: 0.4123\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 1s 98us/sample - loss: 0.3873 - val_loss: 0.4131\n",
      "3870/3870 [==============================] - 0s 37us/sample - loss: 0.3878\n",
      "7740/7740 [==============================] - 0s 45us/sample - loss: 0.3856\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 137us/sample - loss: 2.9677 - val_loss: 1.5791\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 1s 85us/sample - loss: 1.1388 - val_loss: 0.9875\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 1s 88us/sample - loss: 0.7713 - val_loss: 0.7545\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 1s 95us/sample - loss: 0.6400 - val_loss: 0.6739\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 1s 86us/sample - loss: 0.5958 - val_loss: 0.6433\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.5723 - val_loss: 0.6219\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.5544 - val_loss: 0.6051\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 1s 85us/sample - loss: 0.5398 - val_loss: 0.5904\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 0.5268 - val_loss: 0.5779\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 1s 88us/sample - loss: 0.5151 - val_loss: 0.5658\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 1s 84us/sample - loss: 0.5050 - val_loss: 0.5562\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 1s 90us/sample - loss: 0.4954 - val_loss: 0.5466\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 1s 105us/sample - loss: 0.4870 - val_loss: 0.5375\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 1s 97us/sample - loss: 0.4792 - val_loss: 0.5300\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 1s 85us/sample - loss: 0.4719 - val_loss: 0.5216\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 1s 94us/sample - loss: 0.4653 - val_loss: 0.5149\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 1s 117us/sample - loss: 0.4589 - val_loss: 0.5096\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 1s 155us/sample - loss: 0.4530 - val_loss: 0.5003\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 1s 112us/sample - loss: 0.4479 - val_loss: 0.4945\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 1s 128us/sample - loss: 0.4426 - val_loss: 0.4876\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 1s 102us/sample - loss: 0.4371 - val_loss: 0.4817\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 1s 134us/sample - loss: 0.4326 - val_loss: 0.4760\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 1s 141us/sample - loss: 0.4282 - val_loss: 0.4723\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 1s 120us/sample - loss: 0.4239 - val_loss: 0.4672\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 1s 102us/sample - loss: 0.4199 - val_loss: 0.4621\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 1s 120us/sample - loss: 0.4157 - val_loss: 0.4565\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 1s 146us/sample - loss: 0.4121 - val_loss: 0.4533\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 1s 123us/sample - loss: 0.4086 - val_loss: 0.4476\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 1s 132us/sample - loss: 0.4053 - val_loss: 0.4449\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 1s 149us/sample - loss: 0.4019 - val_loss: 0.4429\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 1s 113us/sample - loss: 0.3987 - val_loss: 0.4375\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 1s 138us/sample - loss: 0.3958 - val_loss: 0.4337\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 1s 127us/sample - loss: 0.3930 - val_loss: 0.4339\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 1s 138us/sample - loss: 0.3897 - val_loss: 0.4300\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 1s 124us/sample - loss: 0.3875 - val_loss: 0.4237\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 1s 166us/sample - loss: 0.3848 - val_loss: 0.4226\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 1s 128us/sample - loss: 0.3821 - val_loss: 0.4188\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 1s 108us/sample - loss: 0.3793 - val_loss: 0.4176\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 1s 145us/sample - loss: 0.3772 - val_loss: 0.4127\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 1s 143us/sample - loss: 0.3751 - val_loss: 0.4138\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 1s 144us/sample - loss: 0.3729 - val_loss: 0.4086\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 1s 129us/sample - loss: 0.3709 - val_loss: 0.4064\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 1s 146us/sample - loss: 0.3686 - val_loss: 0.4041\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 1s 107us/sample - loss: 0.3668 - val_loss: 0.4026\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 1s 120us/sample - loss: 0.3648 - val_loss: 0.4003\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 1s 105us/sample - loss: 0.3624 - val_loss: 0.4010\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 1s 124us/sample - loss: 0.3607 - val_loss: 0.3990\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 1s 122us/sample - loss: 0.3590 - val_loss: 0.3962\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 1s 109us/sample - loss: 0.3574 - val_loss: 0.3960\n",
      "3870/3870 [==============================] - 0s 37us/sample - loss: 0.4058\n",
      "7740/7740 [==============================] - 0s 38us/sample - loss: 0.3563\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 138us/sample - loss: 3.5315 - val_loss: 2.3812\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 1s 125us/sample - loss: 1.7857 - val_loss: 1.4210\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 1s 121us/sample - loss: 1.1610 - val_loss: 1.0287\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 1s 100us/sample - loss: 0.8916 - val_loss: 0.8549\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 1s 125us/sample - loss: 0.7709 - val_loss: 0.7752\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 1s 91us/sample - loss: 0.7128 - val_loss: 0.7339\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 1s 117us/sample - loss: 0.6809 - val_loss: 0.7089\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 0.6603 - val_loss: 0.6908\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 1s 111us/sample - loss: 0.6448 - val_loss: 0.6761\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 1s 88us/sample - loss: 0.6323 - val_loss: 0.6630\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.6210 - val_loss: 0.6510\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 0.6108 - val_loss: 0.6402\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 1s 82us/sample - loss: 0.6012 - val_loss: 0.6299\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 1s 85us/sample - loss: 0.5924 - val_loss: 0.6203\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.5842 - val_loss: 0.6118\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 0.5766 - val_loss: 0.6033\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.5695 - val_loss: 0.5956\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.5628 - val_loss: 0.5886\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 1s 82us/sample - loss: 0.5564 - val_loss: 0.5823\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.5508 - val_loss: 0.5760\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.5454 - val_loss: 0.5701\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.5402 - val_loss: 0.5645\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.5355 - val_loss: 0.5593\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.5310 - val_loss: 0.5545\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 1s 79us/sample - loss: 0.5267 - val_loss: 0.5499\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 0.5227 - val_loss: 0.5456\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.5189 - val_loss: 0.5411\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 0.5154 - val_loss: 0.5375\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.5120 - val_loss: 0.5339\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 0.5088 - val_loss: 0.5304\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 1s 76us/sample - loss: 0.5059 - val_loss: 0.5270\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.5031 - val_loss: 0.5237\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.5004 - val_loss: 0.5206\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4979 - val_loss: 0.5181\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 0.4953 - val_loss: 0.5153\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 0.4930 - val_loss: 0.5126\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.4908 - val_loss: 0.5103\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.4887 - val_loss: 0.5078\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4866 - val_loss: 0.5061\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4848 - val_loss: 0.5041\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4828 - val_loss: 0.5018\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.4810 - val_loss: 0.4999\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.4793 - val_loss: 0.4979\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.4777 - val_loss: 0.4963\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 0.4760 - val_loss: 0.4946\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.4745 - val_loss: 0.4928\n",
      "3870/3870 [==============================] - 0s 26us/sample - loss: 0.4441\n",
      "7740/7740 [==============================] - 0s 30us/sample - loss: 0.4731\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 114us/sample - loss: 2.9217 - val_loss: 1.8761\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 1.3641 - val_loss: 1.1319\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.9540 - val_loss: 0.9213\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.8238 - val_loss: 0.8446\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.7687 - val_loss: 0.8087\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.7385 - val_loss: 0.7860\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.7172 - val_loss: 0.7679\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.7001 - val_loss: 0.7527\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.6853 - val_loss: 0.7390\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.6717 - val_loss: 0.7260\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.6595 - val_loss: 0.7140\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6479 - val_loss: 0.7024\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.6372 - val_loss: 0.6917\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6270 - val_loss: 0.6810\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6174 - val_loss: 0.6713\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.6083 - val_loss: 0.6614\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5996 - val_loss: 0.6522\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5913 - val_loss: 0.6437\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5835 - val_loss: 0.6349\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5761 - val_loss: 0.6271\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5689 - val_loss: 0.6191\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5622 - val_loss: 0.6119\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5558 - val_loss: 0.6046\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 1s 65us/sample - loss: 0.5496 - val_loss: 0.5979\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5437 - val_loss: 0.5911\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5381 - val_loss: 0.5848\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5329 - val_loss: 0.5786\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5278 - val_loss: 0.5728\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5229 - val_loss: 0.5675\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5183 - val_loss: 0.5620\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5139 - val_loss: 0.5571\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5097 - val_loss: 0.5526\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5058 - val_loss: 0.5477\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5019 - val_loss: 0.5432\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4982 - val_loss: 0.5386\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4949 - val_loss: 0.5348\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4915 - val_loss: 0.5313\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4884 - val_loss: 0.5274\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4854 - val_loss: 0.5240\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4825 - val_loss: 0.5202\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4798 - val_loss: 0.5173\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4773 - val_loss: 0.5140\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.4747 - val_loss: 0.5109\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4725 - val_loss: 0.5081\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4703 - val_loss: 0.5058\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4680 - val_loss: 0.5027\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4662 - val_loss: 0.5005\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.4641 - val_loss: 0.4980\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.4623 - val_loss: 0.4965\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.4605 - val_loss: 0.4944\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4586 - val_loss: 0.4916\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.4571 - val_loss: 0.4895\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.4557 - val_loss: 0.4882\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4541 - val_loss: 0.4861\n",
      "Epoch 55/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.4527 - val_loss: 0.4849\n",
      "Epoch 56/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.4513 - val_loss: 0.4836\n",
      "3870/3870 [==============================] - 0s 24us/sample - loss: 0.4547\n",
      "7740/7740 [==============================] - 0s 25us/sample - loss: 0.4501\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 75us/sample - loss: 2.6273 - val_loss: 1.7690\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 1.2528 - val_loss: 1.1447\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.9194 - val_loss: 0.9476\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.8088 - val_loss: 0.8677\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.7592 - val_loss: 0.8264\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.7306 - val_loss: 0.7998\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.7092 - val_loss: 0.7786\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6917 - val_loss: 0.7610\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6759 - val_loss: 0.7450\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6613 - val_loss: 0.7303\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6481 - val_loss: 0.7167\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6359 - val_loss: 0.7040\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.6246 - val_loss: 0.6918\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.6140 - val_loss: 0.6805\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6041 - val_loss: 0.6704\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5949 - val_loss: 0.6601\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5862 - val_loss: 0.6508\n",
      "Epoch 18/100\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "7740/7740 [==============================] - 1s 69us/sample - loss: 0.5781 - val_loss: 0.6420\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.5704 - val_loss: 0.6336\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5634 - val_loss: 0.6256\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5565 - val_loss: 0.6181\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5501 - val_loss: 0.6112\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5441 - val_loss: 0.6045\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.5383 - val_loss: 0.5980\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5330 - val_loss: 0.5918\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5278 - val_loss: 0.5865\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5232 - val_loss: 0.5809\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5185 - val_loss: 0.5758\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5144 - val_loss: 0.5709\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5103 - val_loss: 0.5663\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5064 - val_loss: 0.5618\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5028 - val_loss: 0.5573\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4994 - val_loss: 0.5534\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4962 - val_loss: 0.5496\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4930 - val_loss: 0.5461\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4901 - val_loss: 0.5425\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4872 - val_loss: 0.5395\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4846 - val_loss: 0.5364\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4820 - val_loss: 0.5333\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4796 - val_loss: 0.5304\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4774 - val_loss: 0.5274\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4752 - val_loss: 0.5249\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4730 - val_loss: 0.5223\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4710 - val_loss: 0.5198\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4690 - val_loss: 0.5178\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4671 - val_loss: 0.5154\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4654 - val_loss: 0.5136\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4636 - val_loss: 0.5116\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4619 - val_loss: 0.5095\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4603 - val_loss: 0.5075\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4587 - val_loss: 0.5055\n",
      "3870/3870 [==============================] - 0s 28us/sample - loss: 0.4970\n",
      "7740/7740 [==============================] - 0s 28us/sample - loss: 0.4574\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 1.8874 - val_loss: 0.8838\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.7117 - val_loss: 0.7077\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6426 - val_loss: 0.6568\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5999 - val_loss: 0.6181\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5672 - val_loss: 0.5799\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5432 - val_loss: 0.5644\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5226 - val_loss: 0.5398\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5070 - val_loss: 0.5247\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4960 - val_loss: 0.5117\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4873 - val_loss: 0.5019\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4799 - val_loss: 0.4987\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4740 - val_loss: 0.4914\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4691 - val_loss: 0.4865\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4636 - val_loss: 0.4794\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4616 - val_loss: 0.4755\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4565 - val_loss: 0.4758\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4538 - val_loss: 0.4672\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4505 - val_loss: 0.4705\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4476 - val_loss: 0.4632\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4449 - val_loss: 0.4606\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4420 - val_loss: 0.4574\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4396 - val_loss: 0.4558\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.4378 - val_loss: 0.4536\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4361 - val_loss: 0.4516\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4336 - val_loss: 0.4480\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.4312 - val_loss: 0.4463\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 0.4296 - val_loss: 0.4440\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4274 - val_loss: 0.4431\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4255 - val_loss: 0.4404\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4243 - val_loss: 0.4385\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.4223 - val_loss: 0.4366\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.4203 - val_loss: 0.4348\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 1s 66us/sample - loss: 0.4195 - val_loss: 0.4331\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 1s 68us/sample - loss: 0.4173 - val_loss: 0.4320\n",
      "3870/3870 [==============================] - 0s 26us/sample - loss: 0.3951\n",
      "7740/7740 [==============================] - 0s 27us/sample - loss: 0.4160\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 81us/sample - loss: 2.1076 - val_loss: 1.0205\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.8448 - val_loss: 0.7968\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 61us/sample - loss: 0.7073 - val_loss: 0.7266\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.6511 - val_loss: 0.6824\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.6092 - val_loss: 0.6452\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.5779 - val_loss: 0.6153\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.5510 - val_loss: 0.5866\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.5321 - val_loss: 0.5713\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5168 - val_loss: 0.5508\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5038 - val_loss: 0.5364\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4939 - val_loss: 0.5260\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4854 - val_loss: 0.5168\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4788 - val_loss: 0.5080\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4718 - val_loss: 0.5017\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4671 - val_loss: 0.4955\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4613 - val_loss: 0.4899\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4571 - val_loss: 0.4881\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4526 - val_loss: 0.4793\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4492 - val_loss: 0.4766\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4454 - val_loss: 0.4726\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4428 - val_loss: 0.4665\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.4402 - val_loss: 0.4654\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.4366 - val_loss: 0.4608\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4338 - val_loss: 0.4580\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4308 - val_loss: 0.4551\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4293 - val_loss: 0.4531\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4269 - val_loss: 0.4497\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4248 - val_loss: 0.4468\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4224 - val_loss: 0.4445\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4216 - val_loss: 0.4433\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4187 - val_loss: 0.4404\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4175 - val_loss: 0.4386\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4156 - val_loss: 0.4362\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4147 - val_loss: 0.4351\n",
      "3870/3870 [==============================] - 0s 23us/sample - loss: 0.4120\n",
      "7740/7740 [==============================] - 0s 21us/sample - loss: 0.4125\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 76us/sample - loss: 2.5572 - val_loss: 1.0605\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.7518 - val_loss: 0.6972\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.6016 - val_loss: 0.6458\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5697 - val_loss: 0.6152\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5481 - val_loss: 0.5931\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5280 - val_loss: 0.5754\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5124 - val_loss: 0.5593\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5045 - val_loss: 0.5476\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 0.4919 - val_loss: 0.5361\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.4836 - val_loss: 0.5282\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.4765 - val_loss: 0.5200\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4721 - val_loss: 0.5139\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4659 - val_loss: 0.5076\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4586 - val_loss: 0.5028\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4548 - val_loss: 0.4989\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4515 - val_loss: 0.4932\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4477 - val_loss: 0.4888\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4442 - val_loss: 0.4852\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4402 - val_loss: 0.4819\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4369 - val_loss: 0.4770\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4352 - val_loss: 0.4746\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4321 - val_loss: 0.4715\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4301 - val_loss: 0.4678\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4252 - val_loss: 0.4665\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4236 - val_loss: 0.4641\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4204 - val_loss: 0.4625\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4184 - val_loss: 0.4584\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4167 - val_loss: 0.4557\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 59us/sample - loss: 0.4144 - val_loss: 0.4542\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4122 - val_loss: 0.4518\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.4108 - val_loss: 0.4507\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4087 - val_loss: 0.4465\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4070 - val_loss: 0.4458\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.4057 - val_loss: 0.4440\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4053 - val_loss: 0.4422\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4026 - val_loss: 0.4409\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4008 - val_loss: 0.4393\n",
      "3870/3870 [==============================] - 0s 25us/sample - loss: 0.4447\n",
      "7740/7740 [==============================] - 0s 23us/sample - loss: 0.3997\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 80us/sample - loss: 3.0148 - val_loss: 1.8854\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 1.5003 - val_loss: 1.4472\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 64us/sample - loss: 1.3403 - val_loss: 1.3930\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.3233 - val_loss: 1.3852\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 1.3218 - val_loss: 1.3839\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 1.3215 - val_loss: 1.3834\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 1.3216 - val_loss: 1.3830\n",
      "Epoch 8/100\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "7740/7740 [==============================] - 0s 54us/sample - loss: 1.3215 - val_loss: 1.3829\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.3215 - val_loss: 1.3828\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.3215 - val_loss: 1.3827\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.3215 - val_loss: 1.3826\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.3215 - val_loss: 1.3826\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 1.3215 - val_loss: 1.3830\n",
      "3870/3870 [==============================] - 0s 23us/sample - loss: 1.2981\n",
      "7740/7740 [==============================] - 0s 23us/sample - loss: 1.3214\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 76us/sample - loss: 3.2002 - val_loss: 1.8771\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.4968 - val_loss: 1.4513\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.3367 - val_loss: 1.3971\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 1.3173 - val_loss: 1.3877\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 1.3139 - val_loss: 1.3851\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.3129 - val_loss: 1.3847\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.3123 - val_loss: 1.3842\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.3118 - val_loss: 1.3840\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.3115 - val_loss: 1.3838\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.3113 - val_loss: 1.3836\n",
      "3870/3870 [==============================] - 0s 23us/sample - loss: 1.3248\n",
      "7740/7740 [==============================] - 0s 23us/sample - loss: 1.3109\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 3.1833 - val_loss: 1.8682\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.4758 - val_loss: 1.4488\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.3272 - val_loss: 1.3965\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.3112 - val_loss: 1.3870\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.3093 - val_loss: 1.3847\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 1.3090 - val_loss: 1.3846\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.3089 - val_loss: 1.3836\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.3090 - val_loss: 1.3836\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.3090 - val_loss: 1.3838\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 1.3089 - val_loss: 1.3838\n",
      "3870/3870 [==============================] - 0s 23us/sample - loss: 1.3236\n",
      "7740/7740 [==============================] - 0s 22us/sample - loss: 1.3087\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 1.3409 - val_loss: 0.7885\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.8210 - val_loss: 0.7044\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5999 - val_loss: 0.5782\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5364 - val_loss: 0.5423\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5085 - val_loss: 0.5155\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.4887 - val_loss: 0.4959\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4759 - val_loss: 0.4830\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4683 - val_loss: 0.4839\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4609 - val_loss: 0.4670\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4577 - val_loss: 0.4975\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4610 - val_loss: 0.4593\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4476 - val_loss: 0.4509\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4475 - val_loss: 0.4502\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4406 - val_loss: 0.4436\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4372 - val_loss: 0.4425\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4474 - val_loss: 0.4492\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4357 - val_loss: 0.4415\n",
      "3870/3870 [==============================] - 0s 21us/sample - loss: 0.4038\n",
      "7740/7740 [==============================] - 0s 21us/sample - loss: 0.4334\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 1.6046 - val_loss: 0.8467\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.8820 - val_loss: 0.6656\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5518 - val_loss: 0.5685\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5076 - val_loss: 0.5355\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4849 - val_loss: 0.5266\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4679 - val_loss: 0.4881\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.4533 - val_loss: 0.4699\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4428 - val_loss: 0.4633\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4344 - val_loss: 0.4486\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4290 - val_loss: 0.4454\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4219 - val_loss: 0.4378\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.4180 - val_loss: 0.4359\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4150 - val_loss: 0.4279\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4119 - val_loss: 0.4251\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4100 - val_loss: 0.4234\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4082 - val_loss: 0.4230\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4064 - val_loss: 0.4175\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4040 - val_loss: 0.4189\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4032 - val_loss: 0.4112\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4014 - val_loss: 0.4104\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4003 - val_loss: 0.4088\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.3983 - val_loss: 0.4084\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.3980 - val_loss: 0.4072\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.3978 - val_loss: 0.4096\n",
      "3870/3870 [==============================] - 0s 22us/sample - loss: 0.3906\n",
      "7740/7740 [==============================] - 0s 21us/sample - loss: 0.3955\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 74us/sample - loss: 1.7829 - val_loss: 0.6810\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5679 - val_loss: 0.6030\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5146 - val_loss: 0.5424\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4834 - val_loss: 0.5178\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4627 - val_loss: 0.4981\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4460 - val_loss: 0.4802\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4340 - val_loss: 0.4684\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4242 - val_loss: 0.4550\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4158 - val_loss: 0.4459\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4094 - val_loss: 0.4382\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4041 - val_loss: 0.4325\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.3996 - val_loss: 0.4317\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.3968 - val_loss: 0.4252\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.3935 - val_loss: 0.4222\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.3907 - val_loss: 0.4177\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.3878 - val_loss: 0.4181\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.3863 - val_loss: 0.4155\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.3846 - val_loss: 0.4170\n",
      "3870/3870 [==============================] - 0s 23us/sample - loss: 0.4217\n",
      "7740/7740 [==============================] - 0s 23us/sample - loss: 0.3834\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 3.1910 - val_loss: 2.4369\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 1.8886 - val_loss: 1.5560\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 1.2893 - val_loss: 1.1466\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 1.0294 - val_loss: 0.9814\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.9186 - val_loss: 0.9065\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.8626 - val_loss: 0.8644\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.8254 - val_loss: 0.8352\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.7957 - val_loss: 0.8078\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.7697 - val_loss: 0.7830\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 63us/sample - loss: 0.7463 - val_loss: 0.7611\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.7248 - val_loss: 0.7419\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.7049 - val_loss: 0.7234\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6864 - val_loss: 0.7061\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6694 - val_loss: 0.6893\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6535 - val_loss: 0.6738\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6388 - val_loss: 0.6587\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6251 - val_loss: 0.6447\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 62us/sample - loss: 0.6122 - val_loss: 0.6321\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.6001 - val_loss: 0.6199\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 58us/sample - loss: 0.5888 - val_loss: 0.6085\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5783 - val_loss: 0.5982\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5687 - val_loss: 0.5889\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.5596 - val_loss: 0.5799\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.5511 - val_loss: 0.5717\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 1s 67us/sample - loss: 0.5435 - val_loss: 0.5640\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 1s 70us/sample - loss: 0.5363 - val_loss: 0.5571\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 56us/sample - loss: 0.5298 - val_loss: 0.5504\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5237 - val_loss: 0.5443\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 60us/sample - loss: 0.5183 - val_loss: 0.5387\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 55us/sample - loss: 0.5131 - val_loss: 0.5333\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5082 - val_loss: 0.5283\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.5035 - val_loss: 0.5232\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4990 - val_loss: 0.5186\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.4949 - val_loss: 0.5144\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4911 - val_loss: 0.5109\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4876 - val_loss: 0.5071\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4846 - val_loss: 0.5043\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4817 - val_loss: 0.5017\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4790 - val_loss: 0.4982\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4767 - val_loss: 0.4956\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4743 - val_loss: 0.4938\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4722 - val_loss: 0.4917\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4702 - val_loss: 0.4892\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4684 - val_loss: 0.4876\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4666 - val_loss: 0.4862\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4650 - val_loss: 0.4843\n",
      "3870/3870 [==============================] - 0s 23us/sample - loss: 0.4446\n",
      "7740/7740 [==============================] - 0s 23us/sample - loss: 0.4637\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 3.6749 - val_loss: 2.5605\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 2.0147 - val_loss: 1.5705\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 1.3089 - val_loss: 1.1326\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.9922 - val_loss: 0.9477\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.8543 - val_loss: 0.8686\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.7904 - val_loss: 0.8303\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.7566 - val_loss: 0.8060\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.7338 - val_loss: 0.7883\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.7167 - val_loss: 0.7735\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.7024 - val_loss: 0.7606\n",
      "Epoch 11/100\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6897 - val_loss: 0.7491\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6785 - val_loss: 0.7383\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6680 - val_loss: 0.7283\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6584 - val_loss: 0.7187\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6493 - val_loss: 0.7096\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.6409 - val_loss: 0.7004\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6329 - val_loss: 0.6918\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6253 - val_loss: 0.6835\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6182 - val_loss: 0.6755\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.6114 - val_loss: 0.6678\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 57us/sample - loss: 0.6050 - val_loss: 0.6607\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 1s 72us/sample - loss: 0.5990 - val_loss: 0.6538\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5932 - val_loss: 0.6473\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5878 - val_loss: 0.6410\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5827 - val_loss: 0.6350\n",
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5778 - val_loss: 0.6291\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5731 - val_loss: 0.6234\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5686 - val_loss: 0.6179\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5643 - val_loss: 0.6126\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5601 - val_loss: 0.6076\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5561 - val_loss: 0.6027\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5522 - val_loss: 0.5980\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5485 - val_loss: 0.5934\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5448 - val_loss: 0.5890\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5414 - val_loss: 0.5846\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5380 - val_loss: 0.5806\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5347 - val_loss: 0.5766\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5316 - val_loss: 0.5727\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5286 - val_loss: 0.5691\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5257 - val_loss: 0.5656\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5228 - val_loss: 0.5621\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5201 - val_loss: 0.5587\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5175 - val_loss: 0.5555\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5150 - val_loss: 0.5524\n",
      "Epoch 45/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5124 - val_loss: 0.5493\n",
      "Epoch 46/100\n",
      "7740/7740 [==============================] - 0s 54us/sample - loss: 0.5100 - val_loss: 0.5462\n",
      "Epoch 47/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5076 - val_loss: 0.5432\n",
      "Epoch 48/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5052 - val_loss: 0.5401\n",
      "Epoch 49/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5029 - val_loss: 0.5374\n",
      "Epoch 50/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5006 - val_loss: 0.5346\n",
      "Epoch 51/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.4984 - val_loss: 0.5320\n",
      "Epoch 52/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4963 - val_loss: 0.5296\n",
      "Epoch 53/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4943 - val_loss: 0.5272\n",
      "Epoch 54/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4924 - val_loss: 0.5249\n",
      "Epoch 55/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4905 - val_loss: 0.5228\n",
      "Epoch 56/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4887 - val_loss: 0.5207\n",
      "Epoch 57/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4869 - val_loss: 0.5186\n",
      "Epoch 58/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.4853 - val_loss: 0.5167\n",
      "Epoch 59/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.4836 - val_loss: 0.5148\n",
      "Epoch 60/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.4820 - val_loss: 0.5129\n",
      "3870/3870 [==============================] - 0s 22us/sample - loss: 0.4805\n",
      "7740/7740 [==============================] - 0s 22us/sample - loss: 0.4809\n",
      "Train on 7740 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "7740/7740 [==============================] - 1s 73us/sample - loss: 3.0366 - val_loss: 2.2312\n",
      "Epoch 2/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 1.6118 - val_loss: 1.4867\n",
      "Epoch 3/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 1.2410 - val_loss: 1.2292\n",
      "Epoch 4/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 1.0805 - val_loss: 1.1010\n",
      "Epoch 5/100\n",
      "7740/7740 [==============================] - 0s 52us/sample - loss: 0.9834 - val_loss: 1.0177\n",
      "Epoch 6/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.9142 - val_loss: 0.9571\n",
      "Epoch 7/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.8611 - val_loss: 0.9090\n",
      "Epoch 8/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.8190 - val_loss: 0.8690\n",
      "Epoch 9/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.7841 - val_loss: 0.8355\n",
      "Epoch 10/100\n",
      "7740/7740 [==============================] - 0s 53us/sample - loss: 0.7553 - val_loss: 0.8078\n",
      "Epoch 11/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.7307 - val_loss: 0.7850\n",
      "Epoch 12/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.7101 - val_loss: 0.7650\n",
      "Epoch 13/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6925 - val_loss: 0.7469\n",
      "Epoch 14/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6774 - val_loss: 0.7319\n",
      "Epoch 15/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6643 - val_loss: 0.7187\n",
      "Epoch 16/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.6528 - val_loss: 0.7083\n",
      "Epoch 17/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6427 - val_loss: 0.6990\n",
      "Epoch 18/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6336 - val_loss: 0.6897\n",
      "Epoch 19/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.6253 - val_loss: 0.6818\n",
      "Epoch 20/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.6179 - val_loss: 0.6749\n",
      "Epoch 21/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6110 - val_loss: 0.6686\n",
      "Epoch 22/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.6045 - val_loss: 0.6618\n",
      "Epoch 23/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5986 - val_loss: 0.6556\n",
      "Epoch 24/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5933 - val_loss: 0.6513\n",
      "Epoch 25/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5884 - val_loss: 0.6472\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 26/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5837 - val_loss: 0.6444\n",
      "Epoch 27/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5796 - val_loss: 0.6401\n",
      "Epoch 28/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5756 - val_loss: 0.6356\n",
      "Epoch 29/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5720 - val_loss: 0.6311\n",
      "Epoch 30/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5688 - val_loss: 0.6285\n",
      "Epoch 31/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5656 - val_loss: 0.6253\n",
      "Epoch 32/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5625 - val_loss: 0.6224\n",
      "Epoch 33/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5597 - val_loss: 0.6196\n",
      "Epoch 34/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5569 - val_loss: 0.6169\n",
      "Epoch 35/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5543 - val_loss: 0.6132\n",
      "Epoch 36/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5520 - val_loss: 0.6117\n",
      "Epoch 37/100\n",
      "7740/7740 [==============================] - 0s 51us/sample - loss: 0.5498 - val_loss: 0.6094\n",
      "Epoch 38/100\n",
      "7740/7740 [==============================] - 0s 50us/sample - loss: 0.5476 - val_loss: 0.6079\n",
      "Epoch 39/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5455 - val_loss: 0.6050\n",
      "Epoch 40/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5436 - val_loss: 0.6028\n",
      "Epoch 41/100\n",
      "7740/7740 [==============================] - 0s 47us/sample - loss: 0.5417 - val_loss: 0.6016\n",
      "Epoch 42/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5399 - val_loss: 0.6000\n",
      "Epoch 43/100\n",
      "7740/7740 [==============================] - 0s 48us/sample - loss: 0.5382 - val_loss: 0.5982\n",
      "Epoch 44/100\n",
      "7740/7740 [==============================] - 0s 49us/sample - loss: 0.5364 - val_loss: 0.5962\n",
      "3870/3870 [==============================] - 0s 22us/sample - loss: 0.5846\n",
      "7740/7740 [==============================] - 0s 21us/sample - loss: 0.5350\n",
      "Train on 11610 samples, validate on 3870 samples\n",
      "Epoch 1/100\n",
      "11610/11610 [==============================] - 1s 75us/sample - loss: 1.6897 - val_loss: 0.8235\n",
      "Epoch 2/100\n",
      "11610/11610 [==============================] - 1s 51us/sample - loss: 0.6972 - val_loss: 0.6696\n",
      "Epoch 3/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.6021 - val_loss: 0.6194\n",
      "Epoch 4/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.5598 - val_loss: 0.5840\n",
      "Epoch 5/100\n",
      "11610/11610 [==============================] - 1s 49us/sample - loss: 0.5328 - val_loss: 0.5576\n",
      "Epoch 6/100\n",
      "11610/11610 [==============================] - 1s 49us/sample - loss: 0.5113 - val_loss: 0.5347\n",
      "Epoch 7/100\n",
      "11610/11610 [==============================] - 1s 49us/sample - loss: 0.4936 - val_loss: 0.5162\n",
      "Epoch 8/100\n",
      "11610/11610 [==============================] - 1s 51us/sample - loss: 0.4776 - val_loss: 0.4989\n",
      "Epoch 9/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.4639 - val_loss: 0.4843\n",
      "Epoch 10/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.4509 - val_loss: 0.4702\n",
      "Epoch 11/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.4398 - val_loss: 0.4573\n",
      "Epoch 12/100\n",
      "11610/11610 [==============================] - 1s 57us/sample - loss: 0.4300 - val_loss: 0.4457\n",
      "Epoch 13/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.4209 - val_loss: 0.4365\n",
      "Epoch 14/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.4128 - val_loss: 0.4285\n",
      "Epoch 15/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.4059 - val_loss: 0.4232\n",
      "Epoch 16/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.4002 - val_loss: 0.4163\n",
      "Epoch 17/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3951 - val_loss: 0.4096\n",
      "Epoch 18/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3909 - val_loss: 0.4047\n",
      "Epoch 19/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3868 - val_loss: 0.4000\n",
      "Epoch 20/100\n",
      "11610/11610 [==============================] - 1s 49us/sample - loss: 0.3832 - val_loss: 0.3954\n",
      "Epoch 21/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3796 - val_loss: 0.3920\n",
      "Epoch 22/100\n",
      "11610/11610 [==============================] - 1s 49us/sample - loss: 0.3764 - val_loss: 0.3899\n",
      "Epoch 23/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3738 - val_loss: 0.3861\n",
      "Epoch 24/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3708 - val_loss: 0.3837\n",
      "Epoch 25/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3687 - val_loss: 0.3817\n",
      "Epoch 26/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3655 - val_loss: 0.3783\n",
      "Epoch 27/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3638 - val_loss: 0.3766\n",
      "Epoch 28/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3612 - val_loss: 0.3749\n",
      "Epoch 29/100\n",
      "11610/11610 [==============================] - 1s 55us/sample - loss: 0.3595 - val_loss: 0.3725\n",
      "Epoch 30/100\n",
      "11610/11610 [==============================] - 1s 50us/sample - loss: 0.3575 - val_loss: 0.3717\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "RandomizedSearchCV(cv=3, error_score='raise-deprecating',\n",
       "          estimator=<keras.wrappers.scikit_learn.KerasRegressor object at 0x13159f748>,\n",
       "          fit_params=None, iid='warn', n_iter=10, n_jobs=1,\n",
       "          param_distributions={'hidden_layers': [1, 2, 3, 4], 'layer_size': array([ 1,  2, ..., 98, 99]), 'learning_rate': <scipy.stats._distn_infrastructure.rv_frozen object at 0x1326c1e48>},\n",
       "          pre_dispatch='2*n_jobs', random_state=None, refit=True,\n",
       "          return_train_score='warn', scoring=None, verbose=0)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "## 定义搜索集合并执行搜索\n",
    "\n",
    "## 这个导入的函数是用来生成一定范围内的数组\n",
    "from scipy.stats import reciprocal\n",
    "# f(x) = 1/(x*log(b/a)) a <= x <= b\n",
    "\n",
    "\n",
    "param_distribution = {\n",
    "    \"hidden_layers\":[1, 2, 3, 4],\n",
    "    \"layer_size\": np.arange(1, 100),\n",
    "    ## 指定范围时就用它产生，参数的数据不是明显的数组等数据结构，而是个对象\n",
    "    ## 也可以使用：np.linspace(0.01,2,20),\n",
    "    \"learning_rate\": reciprocal(1e-4, 1e-2),\n",
    "}\n",
    "\n",
    "from sklearn.model_selection import RandomizedSearchCV\n",
    "\n",
    "## 调用RandomizedSearchCV，将sklearn模型以及需要选择的参数传入\n",
    "#RandomizedSearchCV参数说明，sklearn_model设置训练的学习器\n",
    "#param_dist字典类型，放入参数搜索范围\n",
    "#scoring = 'neg_log_loss'，精度评价方式设定为“neg_log_loss“\n",
    "#n_iter=300，训练300次，数值越大，获得的参数精度越大，但是搜索时间越长\n",
    "#n_jobs = -1，使用所有的CPU进行训练，默认为1，使用1个CPU\n",
    "\n",
    "random_search_cv = RandomizedSearchCV(sklearn_model,\n",
    "                                      param_distribution,\n",
    "                                      n_iter = 10,\n",
    "                                      cv = 3,\n",
    "                                      ## 使用几个核进行处理\n",
    "                                      n_jobs = 1)\n",
    "\n",
    "## 开始训练（其实这个产生的对象也是个模型）\n",
    "## 个人猜测其内部的原理也就是：分别训练各种参数组合下的模型，寻找最优解\n",
    "\n",
    "random_search_cv.fit(x_train_scaled, y_train, epochs = 100,\n",
    "                     validation_data = (x_valid_scaled, y_valid),\n",
    "                     callbacks = callbacks)\n",
    "\n",
    "# cross_validation: 训练集分成n份，n-1训练，最后一份验证."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'hidden_layers': 4, 'layer_size': 44, 'learning_rate': 0.0009577195482517434}\n",
      "-0.38287415603026553\n",
      "<keras.wrappers.scikit_learn.KerasRegressor object at 0x138dbc080>\n"
     ]
    }
   ],
   "source": [
    "print(random_search_cv.best_params_)\n",
    "print(random_search_cv.best_score_)\n",
    "print(random_search_cv.best_estimator_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "5160/5160 [==============================] - 0s 30us/sample - loss: 0.3790\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.3790317230446394"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "## 获取最好的模型\n",
    "model = random_search_cv.best_estimator_.model\n",
    "model.evaluate(x_test_scaled, y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
