{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "from keras.preprocessing.image import ImageDataGenerator\n",
    "from keras.callbacks import ModelCheckpoint,EarlyStopping,ReduceLROnPlateau,TensorBoard\n",
    "from keras.models import Model\n",
    "from keras import backend as K\n",
    "from keras.optimizers import Adam\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import os\n",
    "from losses import bce_dice_loss\n",
    "from rssegnet import RSSegVGGNet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def adjust_data(img,mask):\n",
    "    mean_ = np.array([96.24618792,104.67673492,99.35844062])\n",
    "    std_ = np.array([24.8796,25.4743,28.6289])\n",
    "    img = (img - mean_) / std_\n",
    "    mask = mask / 255\n",
    "    mask[mask > 0.5] = 1\n",
    "    mask[mask <= 0.5] = 0\n",
    "    return (img,mask)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size=8\n",
    "train_dataset_size=60000\n",
    "val_dataset_size=12000\n",
    "\n",
    "def make_train_generator():\n",
    "    train_data_root = 'D:/Data/AerialImageDataset/train'  # 训练集存放路径\n",
    "\n",
    "    # data augmentation\n",
    "    data_gen_args = dict(rotation_range=180,\n",
    "                        width_shift_range=0.2,\n",
    "                        height_shift_range=0.2,\n",
    "                        shear_range=0.2,\n",
    "                        zoom_range=0.2,\n",
    "                        horizontal_flip=True,\n",
    "                        vertical_flip=True,\n",
    "                        fill_mode='constant',\n",
    "                        cval=0\n",
    "                        )\n",
    "#     data_gen_args = dict()\n",
    "\n",
    "    train_image_datagen = ImageDataGenerator(**data_gen_args)\n",
    "    train_mask_datagen = ImageDataGenerator(**data_gen_args)\n",
    "\n",
    "    seed=1\n",
    "\n",
    "    train_image_generator = train_image_datagen.flow_from_directory(\n",
    "        train_data_root + '/images_resized_clipped',\n",
    "        class_mode=None,\n",
    "        batch_size=batch_size,\n",
    "        seed=seed,\n",
    "        color_mode='rgb',\n",
    "        save_to_dir=train_data_root + '/images_aug',\n",
    "        save_format='jpeg')\n",
    "\n",
    "    train_mask_generator = train_mask_datagen.flow_from_directory(\n",
    "        train_data_root + '/gt_resized_clipped',\n",
    "        class_mode=None,\n",
    "        batch_size=batch_size,\n",
    "        seed=seed,\n",
    "        color_mode='grayscale',\n",
    "        save_to_dir=train_data_root + '/masks_aug',\n",
    "        save_format='jpeg')\n",
    "\n",
    "    # combine generators into one which yields image and masks\n",
    "    train_generator = zip(train_image_generator, train_mask_generator)\n",
    "    \n",
    "    for (image, mask) in train_generator:\n",
    "#         print(image.shape)\n",
    "#         print(mask.shape)\n",
    "        yield adjust_data(image, mask)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def make_val_generator():\n",
    "    val_data_root = 'D:/Data/AerialImageDataset/val'  # 验证集存放路径\n",
    "    \n",
    "    # data augmentation\n",
    "    # data_gen_args = dict(rotation_range=0.2,\n",
    "    #                     width_shift_range=0.05,\n",
    "    #                     height_shift_range=0.05,\n",
    "    #                     shear_range=0.05,\n",
    "    #                     zoom_range=0.05,\n",
    "    #                     horizontal_flip=True,\n",
    "    #                     fill_mode='nearest')\n",
    "    data_gen_args = dict()\n",
    "\n",
    "    val_image_datagen = ImageDataGenerator(**data_gen_args)\n",
    "    val_mask_datagen = ImageDataGenerator(**data_gen_args)\n",
    "\n",
    "    seed = 1\n",
    "\n",
    "    val_image_generator = val_image_datagen.flow_from_directory(\n",
    "        val_data_root + '/images_resized_clipped',\n",
    "        class_mode=None,\n",
    "        batch_size=batch_size,\n",
    "        seed=seed,\n",
    "        color_mode='rgb')\n",
    "\n",
    "    val_mask_generator = val_mask_datagen.flow_from_directory(\n",
    "        val_data_root + '/gt_resized_clipped',\n",
    "        class_mode=None,\n",
    "        batch_size=batch_size,\n",
    "        seed=seed,\n",
    "        color_mode='grayscale')\n",
    "\n",
    "    val_generator = zip(val_image_generator, val_mask_generator)\n",
    "    \n",
    "    for (image, mask) in val_generator:\n",
    "#         print(image.shape)\n",
    "#         print(mask.shape)\n",
    "        yield adjust_data(image, mask)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "image_input (InputLayer)        (None, 256, 256, 3)  0                                            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_1 (Conv2D)               (None, 256, 256, 64) 1792        image_input[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_1 (BatchNor (None, 256, 256, 64) 256         conv2d_1[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_2 (Conv2D)               (None, 256, 256, 64) 36928       batch_normalization_1[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_2 (BatchNor (None, 256, 256, 64) 256         conv2d_2[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_3 (Conv2D)               (None, 256, 256, 64) 36928       batch_normalization_2[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "max_pooling2d_1 (MaxPooling2D)  (None, 128, 128, 64) 0           conv2d_3[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_3 (BatchNor (None, 128, 128, 64) 256         max_pooling2d_1[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_4 (Conv2D)               (None, 128, 128, 128 73856       batch_normalization_3[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_4 (BatchNor (None, 128, 128, 128 512         conv2d_4[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_5 (Conv2D)               (None, 128, 128, 128 147584      batch_normalization_4[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_5 (BatchNor (None, 128, 128, 128 512         conv2d_5[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_6 (Conv2D)               (None, 128, 128, 128 147584      batch_normalization_5[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "max_pooling2d_2 (MaxPooling2D)  (None, 64, 64, 128)  0           conv2d_6[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_6 (BatchNor (None, 64, 64, 128)  512         max_pooling2d_2[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_7 (Conv2D)               (None, 64, 64, 256)  295168      batch_normalization_6[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_7 (BatchNor (None, 64, 64, 256)  1024        conv2d_7[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_8 (Conv2D)               (None, 64, 64, 256)  590080      batch_normalization_7[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_8 (BatchNor (None, 64, 64, 256)  1024        conv2d_8[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_9 (Conv2D)               (None, 64, 64, 256)  590080      batch_normalization_8[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "max_pooling2d_3 (MaxPooling2D)  (None, 32, 32, 256)  0           conv2d_9[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_9 (BatchNor (None, 32, 32, 256)  1024        max_pooling2d_3[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_10 (Conv2D)              (None, 32, 32, 512)  1180160     batch_normalization_9[0][0]      \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_10 (BatchNo (None, 32, 32, 512)  2048        conv2d_10[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_11 (Conv2D)              (None, 32, 32, 512)  2359808     batch_normalization_10[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_11 (BatchNo (None, 32, 32, 512)  2048        conv2d_11[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_12 (Conv2D)              (None, 32, 32, 512)  2359808     batch_normalization_11[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "max_pooling2d_4 (MaxPooling2D)  (None, 16, 16, 512)  0           conv2d_12[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_12 (BatchNo (None, 16, 16, 512)  2048        max_pooling2d_4[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_13 (Conv2D)              (None, 16, 16, 512)  2359808     batch_normalization_12[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_13 (BatchNo (None, 16, 16, 512)  2048        conv2d_13[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_14 (Conv2D)              (None, 16, 16, 512)  2359808     batch_normalization_13[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_14 (BatchNo (None, 16, 16, 512)  2048        conv2d_14[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_15 (Conv2D)              (None, 16, 16, 512)  2359808     batch_normalization_14[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "max_pooling2d_5 (MaxPooling2D)  (None, 8, 8, 512)    0           conv2d_15[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_15 (BatchNo (None, 8, 8, 512)    2048        max_pooling2d_5[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_16 (Conv2D)              (None, 8, 8, 512)    2359808     batch_normalization_15[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_16 (BatchNo (None, 8, 8, 512)    2048        conv2d_16[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_17 (Conv2D)              (None, 8, 8, 512)    2359808     batch_normalization_16[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_17 (BatchNo (None, 8, 8, 512)    2048        conv2d_17[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_transpose_1 (Conv2DTrans (None, 16, 16, 256)  1179904     batch_normalization_17[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_1 (Concatenate)     (None, 16, 16, 768)  0           conv2d_transpose_1[0][0]         \n",
      "                                                                 conv2d_15[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_18 (BatchNo (None, 16, 16, 768)  3072        concatenate_1[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_18 (Conv2D)              (None, 16, 16, 512)  3539456     batch_normalization_18[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_19 (BatchNo (None, 16, 16, 512)  2048        conv2d_18[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_19 (Conv2D)              (None, 16, 16, 512)  2359808     batch_normalization_19[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_20 (BatchNo (None, 16, 16, 512)  2048        conv2d_19[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_transpose_2 (Conv2DTrans (None, 32, 32, 256)  1179904     batch_normalization_20[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_2 (Concatenate)     (None, 32, 32, 768)  0           conv2d_transpose_2[0][0]         \n",
      "                                                                 conv2d_12[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_21 (BatchNo (None, 32, 32, 768)  3072        concatenate_2[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_20 (Conv2D)              (None, 32, 32, 512)  3539456     batch_normalization_21[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_22 (BatchNo (None, 32, 32, 512)  2048        conv2d_20[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_21 (Conv2D)              (None, 32, 32, 512)  2359808     batch_normalization_22[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_23 (BatchNo (None, 32, 32, 512)  2048        conv2d_21[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_transpose_3 (Conv2DTrans (None, 64, 64, 128)  589952      batch_normalization_23[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_3 (Concatenate)     (None, 64, 64, 384)  0           conv2d_transpose_3[0][0]         \n",
      "                                                                 conv2d_9[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_24 (BatchNo (None, 64, 64, 384)  1536        concatenate_3[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_22 (Conv2D)              (None, 64, 64, 256)  884992      batch_normalization_24[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_25 (BatchNo (None, 64, 64, 256)  1024        conv2d_22[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_23 (Conv2D)              (None, 64, 64, 256)  590080      batch_normalization_25[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_26 (BatchNo (None, 64, 64, 256)  1024        conv2d_23[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_transpose_4 (Conv2DTrans (None, 128, 128, 64) 147520      batch_normalization_26[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_4 (Concatenate)     (None, 128, 128, 192 0           conv2d_transpose_4[0][0]         \n",
      "                                                                 conv2d_6[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_27 (BatchNo (None, 128, 128, 192 768         concatenate_4[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_24 (Conv2D)              (None, 128, 128, 128 221312      batch_normalization_27[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_28 (BatchNo (None, 128, 128, 128 512         conv2d_24[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_25 (Conv2D)              (None, 128, 128, 128 147584      batch_normalization_28[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_29 (BatchNo (None, 128, 128, 128 512         conv2d_25[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_transpose_5 (Conv2DTrans (None, 256, 256, 64) 73792       batch_normalization_29[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "concatenate_5 (Concatenate)     (None, 256, 256, 128 0           conv2d_transpose_5[0][0]         \n",
      "                                                                 conv2d_3[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_30 (BatchNo (None, 256, 256, 128 512         concatenate_5[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_26 (Conv2D)              (None, 256, 256, 64) 73792       batch_normalization_30[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_31 (BatchNo (None, 256, 256, 64) 256         conv2d_26[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_27 (Conv2D)              (None, 256, 256, 64) 36928       batch_normalization_31[0][0]     \n",
      "__________________________________________________________________________________________________\n",
      "batch_normalization_32 (BatchNo (None, 256, 256, 64) 256         conv2d_27[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_28 (Conv2D)              (None, 256, 256, 1)  577         batch_normalization_32[0][0]     \n",
      "==================================================================================================\n",
      "Total params: 36,586,177\n",
      "Trainable params: 36,564,929\n",
      "Non-trainable params: 21,248\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "model = RSSegVGGNet.build()\n",
    "model.compile(loss=bce_dice_loss, optimizer=Adam(), metrics=['accuracy'])\n",
    "model.summary()\n",
    "# model.load_weights(\"building_seg_vgg16_bcedice_0918.h5\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/1000\n",
      "Found 60000 images belonging to 1 classes.\n",
      "Found 60000 images belonging to 1 classes.\n",
      "7499/7500 [============================>.] - ETA: 0s - loss: 0.7470 - acc: 0.8962Found 12000 images belonging to 1 classes.\n",
      "Found 12000 images belonging to 1 classes.\n",
      "7500/7500 [==============================] - 5986s 798ms/step - loss: 0.7470 - acc: 0.8962 - val_loss: 0.9585 - val_acc: 0.8427\n",
      "Epoch 2/1000\n",
      "7500/7500 [==============================] - 4304s 574ms/step - loss: 0.5839 - acc: 0.9223 - val_loss: 0.6367 - val_acc: 0.9161\n",
      "Epoch 3/1000\n",
      "7500/7500 [==============================] - 4306s 574ms/step - loss: 0.4850 - acc: 0.9362 - val_loss: 0.5214 - val_acc: 0.9336\n",
      "Epoch 4/1000\n",
      "7500/7500 [==============================] - 4414s 589ms/step - loss: 0.4415 - acc: 0.9424 - val_loss: 0.6441 - val_acc: 0.9028\n",
      "Epoch 5/1000\n",
      "7500/7500 [==============================] - 4293s 572ms/step - loss: 0.4153 - acc: 0.9458 - val_loss: 0.6639 - val_acc: 0.8996\n",
      "Epoch 6/1000\n",
      "7500/7500 [==============================] - 4297s 573ms/step - loss: 0.3953 - acc: 0.9486 - val_loss: 0.4463 - val_acc: 0.9442\n",
      "Epoch 7/1000\n",
      "7500/7500 [==============================] - 4295s 573ms/step - loss: 0.3770 - acc: 0.9510 - val_loss: 0.3866 - val_acc: 0.9506\n",
      "Epoch 8/1000\n",
      "7500/7500 [==============================] - 4299s 573ms/step - loss: 0.3677 - acc: 0.9523 - val_loss: 0.4236 - val_acc: 0.9442\n",
      "Epoch 9/1000\n",
      "7500/7500 [==============================] - 4304s 574ms/step - loss: 0.3553 - acc: 0.9540 - val_loss: 0.3875 - val_acc: 0.9489\n",
      "Epoch 10/1000\n",
      "7500/7500 [==============================] - 4302s 574ms/step - loss: 0.3470 - acc: 0.9552 - val_loss: 0.3865 - val_acc: 0.9498\n",
      "Epoch 11/1000\n",
      "7500/7500 [==============================] - 4292s 572ms/step - loss: 0.3402 - acc: 0.9561 - val_loss: 0.3744 - val_acc: 0.9523\n",
      "Epoch 12/1000\n",
      "7500/7500 [==============================] - 4290s 572ms/step - loss: 0.3331 - acc: 0.9570 - val_loss: 0.3842 - val_acc: 0.9507\n",
      "Epoch 13/1000\n",
      "7500/7500 [==============================] - 4291s 572ms/step - loss: 0.3268 - acc: 0.9579 - val_loss: 0.3858 - val_acc: 0.9487\n",
      "Epoch 14/1000\n",
      "7500/7500 [==============================] - 4292s 572ms/step - loss: 0.3217 - acc: 0.9585 - val_loss: 0.3795 - val_acc: 0.9520\n",
      "Epoch 15/1000\n",
      "7500/7500 [==============================] - 4290s 572ms/step - loss: 0.3177 - acc: 0.9591 - val_loss: 0.3347 - val_acc: 0.9579\n",
      "Epoch 16/1000\n",
      "7500/7500 [==============================] - 4292s 572ms/step - loss: 0.3122 - acc: 0.9597 - val_loss: 0.3606 - val_acc: 0.9526\n",
      "Epoch 17/1000\n",
      "7500/7500 [==============================] - 4297s 573ms/step - loss: 0.3095 - acc: 0.9602 - val_loss: 0.3451 - val_acc: 0.9546\n",
      "Epoch 18/1000\n",
      "7500/7500 [==============================] - 4298s 573ms/step - loss: 0.3068 - acc: 0.9605 - val_loss: 0.3451 - val_acc: 0.9564\n",
      "Epoch 19/1000\n",
      "7500/7500 [==============================] - 4623s 616ms/step - loss: 0.3021 - acc: 0.9611 - val_loss: 0.3476 - val_acc: 0.9554\n",
      "Epoch 20/1000\n",
      "7500/7500 [==============================] - 4497s 600ms/step - loss: 0.3022 - acc: 0.9611 - val_loss: 0.3391 - val_acc: 0.9572\n",
      "Epoch 21/1000\n",
      "7500/7500 [==============================] - 4306s 574ms/step - loss: 0.2985 - acc: 0.9617 - val_loss: 0.3675 - val_acc: 0.9522\n",
      "Epoch 22/1000\n",
      "7500/7500 [==============================] - 4295s 573ms/step - loss: 0.2976 - acc: 0.9618 - val_loss: 0.3551 - val_acc: 0.9535\n",
      "Epoch 23/1000\n",
      "7500/7500 [==============================] - 4464s 595ms/step - loss: 0.2932 - acc: 0.9624 - val_loss: 0.3263 - val_acc: 0.9580\n",
      "Epoch 24/1000\n",
      "7500/7500 [==============================] - 4477s 597ms/step - loss: 0.2935 - acc: 0.9623 - val_loss: 0.3511 - val_acc: 0.9539\n",
      "Epoch 25/1000\n",
      "7500/7500 [==============================] - 4345s 579ms/step - loss: 0.2891 - acc: 0.9629 - val_loss: 0.3234 - val_acc: 0.9600\n",
      "Epoch 26/1000\n",
      "7500/7500 [==============================] - 4302s 574ms/step - loss: 0.2867 - acc: 0.9633 - val_loss: 0.3313 - val_acc: 0.9585\n",
      "Epoch 27/1000\n",
      "7500/7500 [==============================] - 4306s 574ms/step - loss: 0.2847 - acc: 0.9635 - val_loss: 0.3214 - val_acc: 0.9594\n",
      "Epoch 28/1000\n",
      "7500/7500 [==============================] - 4306s 574ms/step - loss: 0.2801 - acc: 0.9640 - val_loss: 0.3399 - val_acc: 0.9586\n",
      "Epoch 29/1000\n",
      "7500/7500 [==============================] - 4303s 574ms/step - loss: 0.2808 - acc: 0.9641 - val_loss: 0.3379 - val_acc: 0.9565\n",
      "Epoch 30/1000\n",
      "7500/7500 [==============================] - 4300s 573ms/step - loss: 0.2787 - acc: 0.9642 - val_loss: 0.3158 - val_acc: 0.9607\n",
      "Epoch 31/1000\n",
      "7500/7500 [==============================] - 4296s 573ms/step - loss: 0.2763 - acc: 0.9645 - val_loss: 0.3115 - val_acc: 0.9610\n",
      "Epoch 32/1000\n",
      "7500/7500 [==============================] - 4292s 572ms/step - loss: 0.2721 - acc: 0.9650 - val_loss: 0.3042 - val_acc: 0.9620\n",
      "Epoch 33/1000\n",
      "7500/7500 [==============================] - 4285s 571ms/step - loss: 0.2716 - acc: 0.9651 - val_loss: 0.3161 - val_acc: 0.9612\n",
      "Epoch 34/1000\n",
      "7500/7500 [==============================] - 4283s 571ms/step - loss: 0.2715 - acc: 0.9653 - val_loss: 0.3034 - val_acc: 0.9618\n",
      "Epoch 35/1000\n",
      "7500/7500 [==============================] - 4290s 572ms/step - loss: 0.2703 - acc: 0.9654 - val_loss: 0.3356 - val_acc: 0.9581\n",
      "Epoch 36/1000\n",
      "7500/7500 [==============================] - 4294s 573ms/step - loss: 0.2690 - acc: 0.9655 - val_loss: 0.3174 - val_acc: 0.9590\n",
      "Epoch 37/1000\n",
      "7500/7500 [==============================] - 4291s 572ms/step - loss: 0.2676 - acc: 0.9657 - val_loss: 0.2994 - val_acc: 0.9621\n",
      "Epoch 38/1000\n",
      "7500/7500 [==============================] - 4291s 572ms/step - loss: 0.2663 - acc: 0.9658 - val_loss: 0.3696 - val_acc: 0.9532\n",
      "Epoch 39/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2648 - acc: 0.9660 - val_loss: 0.2969 - val_acc: 0.9629\n",
      "Epoch 40/1000\n",
      "7500/7500 [==============================] - 4302s 574ms/step - loss: 0.2635 - acc: 0.9662 - val_loss: 0.3272 - val_acc: 0.9594\n",
      "Epoch 41/1000\n",
      "7500/7500 [==============================] - 4312s 575ms/step - loss: 0.2619 - acc: 0.9664 - val_loss: 0.3226 - val_acc: 0.9603\n",
      "Epoch 42/1000\n",
      "7500/7500 [==============================] - 4311s 575ms/step - loss: 0.2616 - acc: 0.9666 - val_loss: 0.3037 - val_acc: 0.9617\n",
      "Epoch 43/1000\n",
      "7500/7500 [==============================] - 4314s 575ms/step - loss: 0.2589 - acc: 0.9668 - val_loss: 0.2953 - val_acc: 0.9630\n",
      "Epoch 44/1000\n",
      "7500/7500 [==============================] - 4312s 575ms/step - loss: 0.2581 - acc: 0.9669 - val_loss: 0.3416 - val_acc: 0.9566\n",
      "Epoch 45/1000\n",
      "7500/7500 [==============================] - 4304s 574ms/step - loss: 0.2591 - acc: 0.9668 - val_loss: 0.3269 - val_acc: 0.9596\n",
      "Epoch 46/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2574 - acc: 0.9670 - val_loss: 0.3003 - val_acc: 0.9620\n",
      "Epoch 47/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2569 - acc: 0.9671 - val_loss: 0.3206 - val_acc: 0.9589\n",
      "Epoch 48/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2576 - acc: 0.9670 - val_loss: 0.3218 - val_acc: 0.9585\n",
      "Epoch 49/1000\n",
      "7500/7500 [==============================] - 4308s 574ms/step - loss: 0.2552 - acc: 0.9673 - val_loss: 0.2886 - val_acc: 0.9634\n",
      "Epoch 50/1000\n",
      "7500/7500 [==============================] - 4308s 574ms/step - loss: 0.2529 - acc: 0.9675 - val_loss: 0.3044 - val_acc: 0.9623\n",
      "Epoch 51/1000\n",
      "7500/7500 [==============================] - 4308s 574ms/step - loss: 0.2522 - acc: 0.9676 - val_loss: 0.2964 - val_acc: 0.9642\n",
      "Epoch 52/1000\n",
      "7500/7500 [==============================] - 4312s 575ms/step - loss: 0.2521 - acc: 0.9678 - val_loss: 0.3099 - val_acc: 0.9604\n",
      "Epoch 53/1000\n",
      "7500/7500 [==============================] - 4307s 574ms/step - loss: 0.2516 - acc: 0.9677 - val_loss: 0.3071 - val_acc: 0.9608\n",
      "Epoch 54/1000\n",
      "7500/7500 [==============================] - 4307s 574ms/step - loss: 0.2509 - acc: 0.9680 - val_loss: 0.3037 - val_acc: 0.9625\n",
      "Epoch 55/1000\n",
      "7500/7500 [==============================] - 4308s 574ms/step - loss: 0.2488 - acc: 0.9681 - val_loss: 0.3127 - val_acc: 0.9597\n",
      "Epoch 56/1000\n",
      "7500/7500 [==============================] - 4312s 575ms/step - loss: 0.2491 - acc: 0.9682 - val_loss: 0.3293 - val_acc: 0.9596\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 57/1000\n",
      "7500/7500 [==============================] - 4308s 574ms/step - loss: 0.2469 - acc: 0.9684 - val_loss: 0.2975 - val_acc: 0.9623\n",
      "Epoch 58/1000\n",
      "7500/7500 [==============================] - 4302s 574ms/step - loss: 0.2465 - acc: 0.9683 - val_loss: 0.2913 - val_acc: 0.9633\n",
      "Epoch 59/1000\n",
      "7500/7500 [==============================] - 4343s 579ms/step - loss: 0.2460 - acc: 0.9685 - val_loss: 0.3192 - val_acc: 0.9595\n",
      "Epoch 60/1000\n",
      "7500/7500 [==============================] - 4301s 573ms/step - loss: 0.2336 - acc: 0.9700 - val_loss: 0.2887 - val_acc: 0.9637\n",
      "Epoch 61/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2292 - acc: 0.9707 - val_loss: 0.2857 - val_acc: 0.9637\n",
      "Epoch 62/1000\n",
      "7500/7500 [==============================] - 4335s 578ms/step - loss: 0.2269 - acc: 0.9708 - val_loss: 0.2856 - val_acc: 0.9640\n",
      "Epoch 63/1000\n",
      "7500/7500 [==============================] - 4306s 574ms/step - loss: 0.2271 - acc: 0.9709 - val_loss: 0.2915 - val_acc: 0.9629\n",
      "Epoch 64/1000\n",
      "7500/7500 [==============================] - 4306s 574ms/step - loss: 0.2257 - acc: 0.9710 - val_loss: 0.2830 - val_acc: 0.9647\n",
      "Epoch 65/1000\n",
      "7500/7500 [==============================] - 4309s 575ms/step - loss: 0.2260 - acc: 0.9711 - val_loss: 0.2870 - val_acc: 0.9644\n",
      "Epoch 66/1000\n",
      "7500/7500 [==============================] - 4309s 575ms/step - loss: 0.2253 - acc: 0.9711 - val_loss: 0.2808 - val_acc: 0.9647\n",
      "Epoch 67/1000\n",
      "7500/7500 [==============================] - 4308s 574ms/step - loss: 0.2243 - acc: 0.9713 - val_loss: 0.2863 - val_acc: 0.9642\n",
      "Epoch 68/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2240 - acc: 0.9713 - val_loss: 0.2901 - val_acc: 0.9640\n",
      "Epoch 69/1000\n",
      "7500/7500 [==============================] - 4306s 574ms/step - loss: 0.2241 - acc: 0.9713 - val_loss: 0.2830 - val_acc: 0.9648\n",
      "Epoch 70/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2232 - acc: 0.9715 - val_loss: 0.2794 - val_acc: 0.9652\n",
      "Epoch 71/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2228 - acc: 0.9714 - val_loss: 0.2868 - val_acc: 0.9638\n",
      "Epoch 72/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2238 - acc: 0.9714 - val_loss: 0.2868 - val_acc: 0.9640\n",
      "Epoch 73/1000\n",
      "7500/7500 [==============================] - 4303s 574ms/step - loss: 0.2234 - acc: 0.9714 - val_loss: 0.2777 - val_acc: 0.9655\n",
      "Epoch 74/1000\n",
      "7500/7500 [==============================] - 4307s 574ms/step - loss: 0.2228 - acc: 0.9716 - val_loss: 0.2995 - val_acc: 0.9622\n",
      "Epoch 75/1000\n",
      "7500/7500 [==============================] - 4294s 573ms/step - loss: 0.2221 - acc: 0.9717 - val_loss: 0.2874 - val_acc: 0.9637\n",
      "Epoch 76/1000\n",
      "7500/7500 [==============================] - 4295s 573ms/step - loss: 0.2220 - acc: 0.9716 - val_loss: 0.2845 - val_acc: 0.9638\n",
      "Epoch 77/1000\n",
      "7500/7500 [==============================] - 4302s 574ms/step - loss: 0.2214 - acc: 0.9716 - val_loss: 0.2842 - val_acc: 0.9644\n",
      "Epoch 78/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2199 - acc: 0.9718 - val_loss: 0.2843 - val_acc: 0.9643\n",
      "Epoch 79/1000\n",
      "7500/7500 [==============================] - 4395s 586ms/step - loss: 0.2206 - acc: 0.9718 - val_loss: 0.2884 - val_acc: 0.9641\n",
      "Epoch 80/1000\n",
      "7500/7500 [==============================] - 4309s 575ms/step - loss: 0.2232 - acc: 0.9714 - val_loss: 0.2818 - val_acc: 0.9638\n",
      "Epoch 81/1000\n",
      "7500/7500 [==============================] - 4306s 574ms/step - loss: 0.2200 - acc: 0.9718 - val_loss: 0.2853 - val_acc: 0.9649\n",
      "Epoch 82/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2198 - acc: 0.9718 - val_loss: 0.2877 - val_acc: 0.9637\n",
      "Epoch 83/1000\n",
      "7500/7500 [==============================] - 4418s 589ms/step - loss: 0.2197 - acc: 0.9719 - val_loss: 0.2786 - val_acc: 0.9654\n",
      "Epoch 84/1000\n",
      "7500/7500 [==============================] - 4311s 575ms/step - loss: 0.2204 - acc: 0.9718 - val_loss: 0.2890 - val_acc: 0.9639\n",
      "Epoch 85/1000\n",
      "7500/7500 [==============================] - 4311s 575ms/step - loss: 0.2190 - acc: 0.9721 - val_loss: 0.2809 - val_acc: 0.9643\n",
      "Epoch 86/1000\n",
      "7500/7500 [==============================] - 4310s 575ms/step - loss: 0.2180 - acc: 0.9721 - val_loss: 0.2833 - val_acc: 0.9643\n",
      "Epoch 87/1000\n",
      "7500/7500 [==============================] - 4308s 574ms/step - loss: 0.2187 - acc: 0.9719 - val_loss: 0.2861 - val_acc: 0.9641\n",
      "Epoch 88/1000\n",
      "7500/7500 [==============================] - 4318s 576ms/step - loss: 0.2189 - acc: 0.9720 - val_loss: 0.2869 - val_acc: 0.9637\n",
      "Epoch 89/1000\n",
      "7500/7500 [==============================] - 4314s 575ms/step - loss: 0.2184 - acc: 0.9720 - val_loss: 0.2782 - val_acc: 0.9652\n",
      "Epoch 90/1000\n",
      "7500/7500 [==============================] - 4312s 575ms/step - loss: 0.2184 - acc: 0.9721 - val_loss: 0.2802 - val_acc: 0.9647\n",
      "Epoch 91/1000\n",
      "7500/7500 [==============================] - 4309s 574ms/step - loss: 0.2181 - acc: 0.9721 - val_loss: 0.2880 - val_acc: 0.9643\n",
      "Epoch 92/1000\n",
      "7500/7500 [==============================] - 4310s 575ms/step - loss: 0.2175 - acc: 0.9721 - val_loss: 0.2800 - val_acc: 0.9643\n",
      "Epoch 93/1000\n",
      "7500/7500 [==============================] - 4319s 576ms/step - loss: 0.2175 - acc: 0.9721 - val_loss: 0.2855 - val_acc: 0.9648\n",
      "Epoch 94/1000\n",
      "7500/7500 [==============================] - 4314s 575ms/step - loss: 0.2180 - acc: 0.9721 - val_loss: 0.2883 - val_acc: 0.9636\n",
      "Epoch 95/1000\n",
      "7500/7500 [==============================] - 4314s 575ms/step - loss: 0.2181 - acc: 0.9721 - val_loss: 0.2812 - val_acc: 0.9653\n",
      "Epoch 96/1000\n",
      "7500/7500 [==============================] - 4314s 575ms/step - loss: 0.2178 - acc: 0.9721 - val_loss: 0.2828 - val_acc: 0.9646\n",
      "Epoch 97/1000\n",
      "7500/7500 [==============================] - 4305s 574ms/step - loss: 0.2179 - acc: 0.9720 - val_loss: 0.2856 - val_acc: 0.9643\n",
      "Epoch 98/1000\n",
      "7500/7500 [==============================] - 4314s 575ms/step - loss: 0.2181 - acc: 0.9721 - val_loss: 0.2886 - val_acc: 0.9636\n",
      "Epoch 99/1000\n",
      "7500/7500 [==============================] - 4314s 575ms/step - loss: 0.2179 - acc: 0.9720 - val_loss: 0.2758 - val_acc: 0.9656\n",
      "Epoch 100/1000\n",
      "7500/7500 [==============================] - 4315s 575ms/step - loss: 0.2181 - acc: 0.9721 - val_loss: 0.2857 - val_acc: 0.9640\n",
      "Epoch 101/1000\n",
      "5368/7500 [====================>.........] - ETA: 19:03 - loss: 0.2174 - acc: 0.9722"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-6-72b8f7c85d10>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m     25\u001b[0m                               \u001b[0mvalidation_steps\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mval_dataset_size\u001b[0m\u001b[1;33m/\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     26\u001b[0m                               \u001b[0mcallbacks\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcallback_list\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m                               verbose=1)\n\u001b[0m",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\keras\\legacy\\interfaces.py\u001b[0m in \u001b[0;36mwrapper\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m     89\u001b[0m                 warnings.warn('Update your `' + object_name +\n\u001b[0;32m     90\u001b[0m                               '` call to the Keras 2 API: ' + signature, stacklevel=2)\n\u001b[1;32m---> 91\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     92\u001b[0m         \u001b[0mwrapper\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_original_function\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     93\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mfit_generator\u001b[1;34m(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)\u001b[0m\n\u001b[0;32m   1424\u001b[0m             \u001b[0muse_multiprocessing\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0muse_multiprocessing\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1425\u001b[0m             \u001b[0mshuffle\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mshuffle\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1426\u001b[1;33m             initial_epoch=initial_epoch)\n\u001b[0m\u001b[0;32m   1427\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1428\u001b[0m     \u001b[1;33m@\u001b[0m\u001b[0minterfaces\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlegacy_generator_methods_support\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\keras\\engine\\training_generator.py\u001b[0m in \u001b[0;36mfit_generator\u001b[1;34m(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)\u001b[0m\n\u001b[0;32m    189\u001b[0m                 outs = model.train_on_batch(x, y,\n\u001b[0;32m    190\u001b[0m                                             \u001b[0msample_weight\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0msample_weight\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 191\u001b[1;33m                                             class_weight=class_weight)\n\u001b[0m\u001b[0;32m    192\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    193\u001b[0m                 \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mouts\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlist\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mtrain_on_batch\u001b[1;34m(self, x, y, sample_weight, class_weight)\u001b[0m\n\u001b[0;32m   1218\u001b[0m             \u001b[0mins\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0my\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0msample_weights\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1219\u001b[0m         \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_make_train_function\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1220\u001b[1;33m         \u001b[0moutputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_function\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mins\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1221\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1222\u001b[0m             \u001b[1;32mreturn\u001b[0m \u001b[0moutputs\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, inputs)\u001b[0m\n\u001b[0;32m   2665\u001b[0m                     \u001b[1;34m'In order to feed symbolic tensors to a Keras model '\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2666\u001b[0m                     'in TensorFlow, you need tensorflow 1.8 or higher.')\n\u001b[1;32m-> 2667\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_legacy_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   2668\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2669\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\u001b[0m in \u001b[0;36m_legacy_call\u001b[1;34m(self, inputs)\u001b[0m\n\u001b[0;32m   2647\u001b[0m         \u001b[0msession\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mget_session\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2648\u001b[0m         updated = session.run(fetches=fetches, feed_dict=feed_dict,\n\u001b[1;32m-> 2649\u001b[1;33m                               **self.session_kwargs)\n\u001b[0m\u001b[0;32m   2650\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mupdated\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2651\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36mrun\u001b[1;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m    903\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    904\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[1;32m--> 905\u001b[1;33m                          run_metadata_ptr)\n\u001b[0m\u001b[0;32m    906\u001b[0m       \u001b[1;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    907\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run\u001b[1;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1138\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[1;32mor\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1139\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[1;32m-> 1140\u001b[1;33m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[0;32m   1141\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1142\u001b[0m       \u001b[0mresults\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_run\u001b[1;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1319\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1320\u001b[0m       return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[1;32m-> 1321\u001b[1;33m                            run_metadata)\n\u001b[0m\u001b[0;32m   1322\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1323\u001b[0m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m   1325\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1326\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1327\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1328\u001b[0m     \u001b[1;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1329\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[1;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[0;32m   1310\u001b[0m       \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1311\u001b[0m       return self._call_tf_sessionrun(\n\u001b[1;32m-> 1312\u001b[1;33m           options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[0;32m   1313\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1314\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Anaconda3\\envs\\tfgpu17\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[1;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[0;32m   1418\u001b[0m         return tf_session.TF_Run(\n\u001b[0;32m   1419\u001b[0m             \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1420\u001b[1;33m             status, run_metadata)\n\u001b[0m\u001b[0;32m   1421\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1422\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_call_tf_sessionprun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "callback_list = [\n",
    "    EarlyStopping(\n",
    "        monitor='acc',\n",
    "        patience=10,\n",
    "    ),\n",
    "    ModelCheckpoint(\n",
    "        filepath='building_seg_vgg16_bcedice_0921.h5',\n",
    "        monitor='val_loss',\n",
    "        save_best_only=True,\n",
    "    ),\n",
    "    ReduceLROnPlateau(\n",
    "        monitor='val_loss',\n",
    "        factor=0.1,\n",
    "        patience=10,\n",
    "    ),\n",
    "    TensorBoard(\n",
    "        log_dir = 'logs'\n",
    "    ),\n",
    "]\n",
    "\n",
    "history = model.fit_generator(make_train_generator(),\n",
    "                              epochs=1000,\n",
    "                              steps_per_epoch=int(train_dataset_size/batch_size),\n",
    "                              validation_data=make_val_generator(),\n",
    "                              validation_steps=int(val_dataset_size/batch_size),\n",
    "                              callbacks=callback_list,\n",
    "                              verbose=1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'history' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-7-adcbba80ebb5>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0macc\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhistory\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mhistory\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'acc'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      2\u001b[0m \u001b[0mval_acc\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhistory\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mhistory\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'val_acc'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      3\u001b[0m \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhistory\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mhistory\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'loss'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[0mval_loss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mhistory\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mhistory\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'val_loss'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[0mepochs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0macc\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m+\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'history' is not defined"
     ]
    }
   ],
   "source": [
    "acc = history.history['acc']\n",
    "val_acc = history.history['val_acc']\n",
    "loss = history.history['loss']\n",
    "val_loss = history.history['val_loss']\n",
    "epochs = range(1, len(acc) + 1)\n",
    "plt.plot(epochs, acc, 'bo', label='Training acc')\n",
    "plt.plot(epochs, val_acc, 'b', label='Validation acc')\n",
    "plt.title('Training and validation accuracy')\n",
    "plt.legend()\n",
    "plt.figure()\n",
    "plt.plot(epochs, loss, 'bo', label='Training loss')\n",
    "plt.plot(epochs, val_loss, 'b', label='Validation loss')\n",
    "plt.title('Training and validation loss')\n",
    "plt.legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
