{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "VGG_transfer.ipynb",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "zyHIYtPI3cqn",
        "colab_type": "code",
        "outputId": "325a86db-77d1-4733-caf4-4483b40f9690",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 218
        }
      },
      "source": [
        "from tensorflow import keras \n",
        "from tensorflow.keras.layers import Dense\n",
        "from tensorflow.keras.models import Sequential\n",
        "net = keras.applications.VGG19(weights=\"imagenet\",include_top=False,pooling=\"max\")\n",
        "net.trainable=False\n",
        "newnet = Sequential([\n",
        "            net,\n",
        "            Dense(7)\n",
        "])\n",
        "newnet.build(input_shape=(None,64,64,3))\n",
        "newnet.summary()"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Model: \"sequential_1\"\n",
            "_________________________________________________________________\n",
            "Layer (type)                 Output Shape              Param #   \n",
            "=================================================================\n",
            "vgg19 (Model)                (None, 512)               20024384  \n",
            "_________________________________________________________________\n",
            "dense_1 (Dense)              (None, 7)                 3591      \n",
            "=================================================================\n",
            "Total params: 20,027,975\n",
            "Trainable params: 3,591\n",
            "Non-trainable params: 20,024,384\n",
            "_________________________________________________________________\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "YKQPW-OX6KEB",
        "colab_type": "code",
        "outputId": "08ac96d5-0fe6-4bb9-c070-77278ede6b86",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 50
        }
      },
      "source": [
        "import numpy as np\n",
        "import h5py\n",
        "data = h5py.File(\"/content/drive/My Drive/Marko/Model_learning/graduated_project/dataset/data/hand_dataset.h5\",\"r\")\n",
        "image_data = data[\"data\"]\n",
        "image_label = data[\"label\"]\n",
        "image_data = np.array(image_data)\n",
        "image_label = np.array(image_label)\n",
        "p = np.random.permutation(len(image_data))\n",
        "image_data = image_data[p]\n",
        "image_label = image_label[p]\n",
        "print(image_data.shape)\n",
        "print(image_label.shape)\n"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "(10195, 400, 300, 3)\n",
            "(10195,)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8Jq5wMAiX_QZ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "from tensorflow.keras.preprocessing.image import random_shift\n",
        "import tensorflow as tf\n",
        "mean = tf.constant([0.485,0.456,0.406])\n",
        "std = tf.constant([0.229,0.224,0.225])\n",
        "\n",
        "def normalize(x):\n",
        "  x = (x - mean) / std\n",
        "  return x\n",
        "\n",
        "def denormalize(x):\n",
        "  x = x * std + mean\n",
        "  return x \n",
        "\n",
        "def preprocess(x,y):\n",
        "  x = tf.image.resize(x,[64,64])\n",
        "  #data_aug\n",
        "  x = tf.image.random_flip_left_right(x)\n",
        "  x = tf.image.random_brightness(x, max_delta = 32./255.)\n",
        "  x = tf.image.random_contrast(x, lower=0.5, upper=1.5)\n",
        "  x = tf.image.random_saturation(x, lower=0.5, upper=1.5)\n",
        "\n",
        "  x = tf.cast(x,dtype=tf.float32)/255.\n",
        "  x = normalize(x)\n",
        "  y = tf.convert_to_tensor(y)\n",
        "  y = tf.one_hot(y,depth=7)\n",
        "  return x,y"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "CaEfk_aKYKGR",
        "colab_type": "code",
        "outputId": "d8dc0649-ffee-4052-da9a-89962fe447d7",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 118
        }
      },
      "source": [
        "from sklearn.model_selection import train_test_split\n",
        "X, x_test, Y, y_test = train_test_split(image_data, image_label, test_size=0.08, random_state=666)\n",
        "x_train, x_valid, y_train, y_valid = train_test_split(X, Y, test_size=0.1, random_state=666)\n",
        "print(x_train.shape)\n",
        "print(x_valid.shape)\n",
        "print(x_test.shape)\n",
        "batch_sz = 32\n",
        "\n",
        "db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n",
        "db_train = db_train.shuffle(1000).map(preprocess).batch(batch_sz)\n",
        "\n",
        "db_valid = tf.data.Dataset.from_tensor_slices((x_valid, y_valid))\n",
        "db_valid = db_valid.map(preprocess).batch(batch_sz)\n",
        "\n",
        "db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n",
        "db_test = db_test.map(preprocess).batch(batch_sz)\n",
        "\n",
        "print(db_train)\n",
        "print(db_valid)\n",
        "print(db_test)\n"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "(8441, 400, 300, 3)\n",
            "(938, 400, 300, 3)\n",
            "(816, 400, 300, 3)\n",
            "<BatchDataset shapes: ((None, 64, 64, 3), (None, 7)), types: (tf.float32, tf.float32)>\n",
            "<BatchDataset shapes: ((None, 64, 64, 3), (None, 7)), types: (tf.float32, tf.float32)>\n",
            "<BatchDataset shapes: ((None, 64, 64, 3), (None, 7)), types: (tf.float32, tf.float32)>\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "F3eAT73pYjJV",
        "colab_type": "code",
        "outputId": "15e078f7-adc2-4ed2-8318-43ac93e7ce96",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "from tensorflow.keras import optimizers\n",
        "from tensorflow.keras import losses\n",
        "from tensorflow.keras.callbacks import EarlyStopping\n",
        "newnet.compile(optimizer=optimizers.Adam(lr=0.0001),\n",
        "        loss = losses.CategoricalCrossentropy(from_logits=True),\n",
        "        metrics=[\"accuracy\"])\n",
        "newnet.fit(db_train, \n",
        "      validation_data=db_valid,\n",
        "      validation_freq=1,\n",
        "      epochs=500,)\n",
        "      # callbacks=[early_stopping])"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Epoch 1/500\n",
            "264/264 [==============================] - 6s 22ms/step - loss: 1.7617 - accuracy: 0.1547 - val_loss: 1.5863 - val_accuracy: 0.2143\n",
            "Epoch 2/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 1.4668 - accuracy: 0.2648 - val_loss: 1.4127 - val_accuracy: 0.3006\n",
            "Epoch 3/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 1.3188 - accuracy: 0.3434 - val_loss: 1.3058 - val_accuracy: 0.3742\n",
            "Epoch 4/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 1.2102 - accuracy: 0.4138 - val_loss: 1.2014 - val_accuracy: 0.4222\n",
            "Epoch 5/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 1.1301 - accuracy: 0.4497 - val_loss: 1.1451 - val_accuracy: 0.4510\n",
            "Epoch 6/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 1.0697 - accuracy: 0.4830 - val_loss: 1.0776 - val_accuracy: 0.4989\n",
            "Epoch 7/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 1.0204 - accuracy: 0.5042 - val_loss: 1.0245 - val_accuracy: 0.5128\n",
            "Epoch 8/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.9809 - accuracy: 0.5246 - val_loss: 1.0082 - val_accuracy: 0.5000\n",
            "Epoch 9/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.9443 - accuracy: 0.5328 - val_loss: 0.9725 - val_accuracy: 0.5299\n",
            "Epoch 10/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.9165 - accuracy: 0.5403 - val_loss: 0.9385 - val_accuracy: 0.5416\n",
            "Epoch 11/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.8867 - accuracy: 0.5557 - val_loss: 0.9106 - val_accuracy: 0.5576\n",
            "Epoch 12/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.8614 - accuracy: 0.5660 - val_loss: 0.8963 - val_accuracy: 0.5565\n",
            "Epoch 13/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.8383 - accuracy: 0.5746 - val_loss: 0.8760 - val_accuracy: 0.5768\n",
            "Epoch 14/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.8172 - accuracy: 0.5857 - val_loss: 0.8756 - val_accuracy: 0.5714\n",
            "Epoch 15/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.8031 - accuracy: 0.5878 - val_loss: 0.8633 - val_accuracy: 0.5618\n",
            "Epoch 16/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.7903 - accuracy: 0.5968 - val_loss: 0.8329 - val_accuracy: 0.5853\n",
            "Epoch 17/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.7734 - accuracy: 0.5957 - val_loss: 0.8178 - val_accuracy: 0.5938\n",
            "Epoch 18/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.7617 - accuracy: 0.6027 - val_loss: 0.7997 - val_accuracy: 0.5853\n",
            "Epoch 19/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.7455 - accuracy: 0.6085 - val_loss: 0.8131 - val_accuracy: 0.5821\n",
            "Epoch 20/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.7355 - accuracy: 0.6138 - val_loss: 0.7854 - val_accuracy: 0.6013\n",
            "Epoch 21/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.7307 - accuracy: 0.6125 - val_loss: 0.7800 - val_accuracy: 0.5928\n",
            "Epoch 22/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.7133 - accuracy: 0.6171 - val_loss: 0.7717 - val_accuracy: 0.5970\n",
            "Epoch 23/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.7040 - accuracy: 0.6217 - val_loss: 0.7623 - val_accuracy: 0.6023\n",
            "Epoch 24/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.6994 - accuracy: 0.6241 - val_loss: 0.7519 - val_accuracy: 0.6098\n",
            "Epoch 25/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.6862 - accuracy: 0.6288 - val_loss: 0.7250 - val_accuracy: 0.6269\n",
            "Epoch 26/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.6822 - accuracy: 0.6290 - val_loss: 0.7475 - val_accuracy: 0.6077\n",
            "Epoch 27/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.6765 - accuracy: 0.6308 - val_loss: 0.7324 - val_accuracy: 0.6173\n",
            "Epoch 28/500\n",
            "264/264 [==============================] - 6s 23ms/step - loss: 0.6664 - accuracy: 0.6361 - val_loss: 0.7191 - val_accuracy: 0.6045\n",
            "Epoch 29/500\n",
            "264/264 [==============================] - 6s 22ms/step - loss: 0.6600 - accuracy: 0.6388 - val_loss: 0.7196 - val_accuracy: 0.6215\n",
            "Epoch 30/500\n",
            "264/264 [==============================] - 6s 22ms/step - loss: 0.6592 - accuracy: 0.6388 - val_loss: 0.7055 - val_accuracy: 0.6215\n",
            "Epoch 31/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.6473 - accuracy: 0.6410 - val_loss: 0.7069 - val_accuracy: 0.6194\n",
            "Epoch 32/500\n",
            "264/264 [==============================] - 6s 22ms/step - loss: 0.6416 - accuracy: 0.6382 - val_loss: 0.7048 - val_accuracy: 0.6290\n",
            "Epoch 33/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.6394 - accuracy: 0.6472 - val_loss: 0.6745 - val_accuracy: 0.6471\n",
            "Epoch 34/500\n",
            "264/264 [==============================] - 6s 22ms/step - loss: 0.6371 - accuracy: 0.6478 - val_loss: 0.6894 - val_accuracy: 0.6237\n",
            "Epoch 35/500\n",
            "264/264 [==============================] - 6s 22ms/step - loss: 0.6270 - accuracy: 0.6499 - val_loss: 0.6819 - val_accuracy: 0.6386\n",
            "Epoch 36/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.6165 - accuracy: 0.6518 - val_loss: 0.6783 - val_accuracy: 0.6322\n",
            "Epoch 37/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.6158 - accuracy: 0.6480 - val_loss: 0.6851 - val_accuracy: 0.6322\n",
            "Epoch 38/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.6119 - accuracy: 0.6556 - val_loss: 0.6629 - val_accuracy: 0.6397\n",
            "Epoch 39/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.6111 - accuracy: 0.6521 - val_loss: 0.6535 - val_accuracy: 0.6461\n",
            "Epoch 40/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.6015 - accuracy: 0.6600 - val_loss: 0.6423 - val_accuracy: 0.6450\n",
            "Epoch 41/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.5996 - accuracy: 0.6550 - val_loss: 0.6653 - val_accuracy: 0.6375\n",
            "Epoch 42/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.6004 - accuracy: 0.6536 - val_loss: 0.6601 - val_accuracy: 0.6461\n",
            "Epoch 43/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.5955 - accuracy: 0.6622 - val_loss: 0.6620 - val_accuracy: 0.6354\n",
            "Epoch 44/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.5914 - accuracy: 0.6592 - val_loss: 0.6474 - val_accuracy: 0.6418\n",
            "Epoch 45/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5847 - accuracy: 0.6598 - val_loss: 0.6480 - val_accuracy: 0.6525\n",
            "Epoch 46/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.5777 - accuracy: 0.6658 - val_loss: 0.6350 - val_accuracy: 0.6727\n",
            "Epoch 47/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5835 - accuracy: 0.6614 - val_loss: 0.6497 - val_accuracy: 0.6535\n",
            "Epoch 48/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5747 - accuracy: 0.6633 - val_loss: 0.6463 - val_accuracy: 0.6450\n",
            "Epoch 49/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5792 - accuracy: 0.6625 - val_loss: 0.6646 - val_accuracy: 0.6450\n",
            "Epoch 50/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5708 - accuracy: 0.6658 - val_loss: 0.6409 - val_accuracy: 0.6439\n",
            "Epoch 51/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5669 - accuracy: 0.6656 - val_loss: 0.6434 - val_accuracy: 0.6567\n",
            "Epoch 52/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5607 - accuracy: 0.6718 - val_loss: 0.6205 - val_accuracy: 0.6503\n",
            "Epoch 53/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5633 - accuracy: 0.6699 - val_loss: 0.6183 - val_accuracy: 0.6557\n",
            "Epoch 54/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5543 - accuracy: 0.6698 - val_loss: 0.6186 - val_accuracy: 0.6620\n",
            "Epoch 55/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5535 - accuracy: 0.6744 - val_loss: 0.6188 - val_accuracy: 0.6578\n",
            "Epoch 56/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5507 - accuracy: 0.6767 - val_loss: 0.6131 - val_accuracy: 0.6663\n",
            "Epoch 57/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5524 - accuracy: 0.6737 - val_loss: 0.6126 - val_accuracy: 0.6525\n",
            "Epoch 58/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5512 - accuracy: 0.6765 - val_loss: 0.6087 - val_accuracy: 0.6535\n",
            "Epoch 59/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5449 - accuracy: 0.6811 - val_loss: 0.5851 - val_accuracy: 0.6738\n",
            "Epoch 60/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5398 - accuracy: 0.6803 - val_loss: 0.5955 - val_accuracy: 0.6706\n",
            "Epoch 61/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5395 - accuracy: 0.6798 - val_loss: 0.6196 - val_accuracy: 0.6429\n",
            "Epoch 62/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5391 - accuracy: 0.6779 - val_loss: 0.6054 - val_accuracy: 0.6780\n",
            "Epoch 63/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5386 - accuracy: 0.6831 - val_loss: 0.5911 - val_accuracy: 0.6695\n",
            "Epoch 64/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5348 - accuracy: 0.6823 - val_loss: 0.6199 - val_accuracy: 0.6525\n",
            "Epoch 65/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5331 - accuracy: 0.6833 - val_loss: 0.6045 - val_accuracy: 0.6620\n",
            "Epoch 66/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5259 - accuracy: 0.6874 - val_loss: 0.5892 - val_accuracy: 0.6695\n",
            "Epoch 67/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5337 - accuracy: 0.6845 - val_loss: 0.5830 - val_accuracy: 0.6770\n",
            "Epoch 68/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5277 - accuracy: 0.6861 - val_loss: 0.6116 - val_accuracy: 0.6642\n",
            "Epoch 69/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5241 - accuracy: 0.6858 - val_loss: 0.5919 - val_accuracy: 0.6610\n",
            "Epoch 70/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5306 - accuracy: 0.6814 - val_loss: 0.5891 - val_accuracy: 0.6663\n",
            "Epoch 71/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5214 - accuracy: 0.6895 - val_loss: 0.6021 - val_accuracy: 0.6684\n",
            "Epoch 72/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5233 - accuracy: 0.6889 - val_loss: 0.5732 - val_accuracy: 0.6834\n",
            "Epoch 73/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5210 - accuracy: 0.6895 - val_loss: 0.5952 - val_accuracy: 0.6706\n",
            "Epoch 74/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5178 - accuracy: 0.6878 - val_loss: 0.5604 - val_accuracy: 0.7090\n",
            "Epoch 75/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5098 - accuracy: 0.6948 - val_loss: 0.5705 - val_accuracy: 0.6823\n",
            "Epoch 76/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5161 - accuracy: 0.6919 - val_loss: 0.5740 - val_accuracy: 0.6812\n",
            "Epoch 77/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.5138 - accuracy: 0.6914 - val_loss: 0.5735 - val_accuracy: 0.6834\n",
            "Epoch 78/500\n",
            "264/264 [==============================] - 6s 22ms/step - loss: 0.5105 - accuracy: 0.6920 - val_loss: 0.5817 - val_accuracy: 0.6706\n",
            "Epoch 79/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5095 - accuracy: 0.6958 - val_loss: 0.5837 - val_accuracy: 0.6748\n",
            "Epoch 80/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5082 - accuracy: 0.6903 - val_loss: 0.5617 - val_accuracy: 0.6802\n",
            "Epoch 81/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.5079 - accuracy: 0.6906 - val_loss: 0.5538 - val_accuracy: 0.6930\n",
            "Epoch 82/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.5026 - accuracy: 0.6992 - val_loss: 0.5705 - val_accuracy: 0.6759\n",
            "Epoch 83/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5051 - accuracy: 0.6927 - val_loss: 0.5610 - val_accuracy: 0.6898\n",
            "Epoch 84/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.5021 - accuracy: 0.6983 - val_loss: 0.5707 - val_accuracy: 0.6791\n",
            "Epoch 85/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.5051 - accuracy: 0.6958 - val_loss: 0.5635 - val_accuracy: 0.6855\n",
            "Epoch 86/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.5041 - accuracy: 0.6933 - val_loss: 0.5792 - val_accuracy: 0.6738\n",
            "Epoch 87/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4943 - accuracy: 0.7020 - val_loss: 0.5610 - val_accuracy: 0.6855\n",
            "Epoch 88/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4992 - accuracy: 0.6967 - val_loss: 0.5574 - val_accuracy: 0.6748\n",
            "Epoch 89/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4982 - accuracy: 0.6989 - val_loss: 0.5689 - val_accuracy: 0.6802\n",
            "Epoch 90/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4918 - accuracy: 0.7000 - val_loss: 0.5728 - val_accuracy: 0.6908\n",
            "Epoch 91/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4893 - accuracy: 0.7003 - val_loss: 0.5674 - val_accuracy: 0.6855\n",
            "Epoch 92/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4916 - accuracy: 0.6962 - val_loss: 0.5460 - val_accuracy: 0.6994\n",
            "Epoch 93/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4907 - accuracy: 0.6987 - val_loss: 0.5468 - val_accuracy: 0.6908\n",
            "Epoch 94/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4867 - accuracy: 0.7029 - val_loss: 0.5542 - val_accuracy: 0.6834\n",
            "Epoch 95/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4858 - accuracy: 0.6991 - val_loss: 0.5528 - val_accuracy: 0.6908\n",
            "Epoch 96/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4775 - accuracy: 0.7070 - val_loss: 0.5530 - val_accuracy: 0.6876\n",
            "Epoch 97/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4799 - accuracy: 0.7094 - val_loss: 0.5385 - val_accuracy: 0.6898\n",
            "Epoch 98/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4841 - accuracy: 0.7063 - val_loss: 0.5585 - val_accuracy: 0.6898\n",
            "Epoch 99/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4818 - accuracy: 0.7036 - val_loss: 0.5467 - val_accuracy: 0.6791\n",
            "Epoch 100/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4837 - accuracy: 0.7029 - val_loss: 0.5549 - val_accuracy: 0.6940\n",
            "Epoch 101/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4749 - accuracy: 0.7060 - val_loss: 0.5463 - val_accuracy: 0.6940\n",
            "Epoch 102/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4806 - accuracy: 0.7050 - val_loss: 0.5517 - val_accuracy: 0.6898\n",
            "Epoch 103/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4782 - accuracy: 0.7019 - val_loss: 0.5463 - val_accuracy: 0.6930\n",
            "Epoch 104/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4773 - accuracy: 0.7009 - val_loss: 0.5400 - val_accuracy: 0.6962\n",
            "Epoch 105/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4714 - accuracy: 0.7081 - val_loss: 0.5482 - val_accuracy: 0.6983\n",
            "Epoch 106/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4728 - accuracy: 0.7074 - val_loss: 0.5456 - val_accuracy: 0.6908\n",
            "Epoch 107/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4749 - accuracy: 0.7060 - val_loss: 0.5460 - val_accuracy: 0.6834\n",
            "Epoch 108/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4745 - accuracy: 0.7030 - val_loss: 0.5387 - val_accuracy: 0.6908\n",
            "Epoch 109/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4738 - accuracy: 0.7066 - val_loss: 0.5559 - val_accuracy: 0.6770\n",
            "Epoch 110/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4700 - accuracy: 0.7073 - val_loss: 0.5352 - val_accuracy: 0.6876\n",
            "Epoch 111/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4750 - accuracy: 0.7069 - val_loss: 0.5475 - val_accuracy: 0.6983\n",
            "Epoch 112/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4674 - accuracy: 0.7089 - val_loss: 0.5345 - val_accuracy: 0.7090\n",
            "Epoch 113/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4647 - accuracy: 0.7090 - val_loss: 0.5412 - val_accuracy: 0.6908\n",
            "Epoch 114/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4635 - accuracy: 0.7096 - val_loss: 0.5363 - val_accuracy: 0.6898\n",
            "Epoch 115/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4640 - accuracy: 0.7109 - val_loss: 0.5142 - val_accuracy: 0.6951\n",
            "Epoch 116/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4709 - accuracy: 0.7035 - val_loss: 0.5463 - val_accuracy: 0.6940\n",
            "Epoch 117/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4620 - accuracy: 0.7075 - val_loss: 0.5324 - val_accuracy: 0.6962\n",
            "Epoch 118/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4597 - accuracy: 0.7105 - val_loss: 0.5262 - val_accuracy: 0.6951\n",
            "Epoch 119/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4638 - accuracy: 0.7090 - val_loss: 0.5410 - val_accuracy: 0.6834\n",
            "Epoch 120/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4548 - accuracy: 0.7159 - val_loss: 0.5431 - val_accuracy: 0.6930\n",
            "Epoch 121/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4646 - accuracy: 0.7036 - val_loss: 0.5305 - val_accuracy: 0.6951\n",
            "Epoch 122/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4648 - accuracy: 0.7067 - val_loss: 0.5310 - val_accuracy: 0.6855\n",
            "Epoch 123/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4598 - accuracy: 0.7108 - val_loss: 0.5056 - val_accuracy: 0.7026\n",
            "Epoch 124/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4585 - accuracy: 0.7121 - val_loss: 0.5302 - val_accuracy: 0.6951\n",
            "Epoch 125/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4578 - accuracy: 0.7127 - val_loss: 0.5231 - val_accuracy: 0.6876\n",
            "Epoch 126/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4586 - accuracy: 0.7116 - val_loss: 0.5226 - val_accuracy: 0.6940\n",
            "Epoch 127/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4573 - accuracy: 0.7107 - val_loss: 0.5358 - val_accuracy: 0.6844\n",
            "Epoch 128/500\n",
            "264/264 [==============================] - 6s 22ms/step - loss: 0.4541 - accuracy: 0.7096 - val_loss: 0.5211 - val_accuracy: 0.6898\n",
            "Epoch 129/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.4518 - accuracy: 0.7167 - val_loss: 0.5200 - val_accuracy: 0.6962\n",
            "Epoch 130/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4535 - accuracy: 0.7141 - val_loss: 0.5268 - val_accuracy: 0.6962\n",
            "Epoch 131/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4605 - accuracy: 0.7103 - val_loss: 0.5163 - val_accuracy: 0.6940\n",
            "Epoch 132/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4509 - accuracy: 0.7188 - val_loss: 0.5210 - val_accuracy: 0.6962\n",
            "Epoch 133/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4488 - accuracy: 0.7170 - val_loss: 0.5237 - val_accuracy: 0.7015\n",
            "Epoch 134/500\n",
            "264/264 [==============================] - 6s 22ms/step - loss: 0.4549 - accuracy: 0.7145 - val_loss: 0.5289 - val_accuracy: 0.6940\n",
            "Epoch 135/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4516 - accuracy: 0.7146 - val_loss: 0.5358 - val_accuracy: 0.6834\n",
            "Epoch 136/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4451 - accuracy: 0.7197 - val_loss: 0.5032 - val_accuracy: 0.7154\n",
            "Epoch 137/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4582 - accuracy: 0.7095 - val_loss: 0.5420 - val_accuracy: 0.6951\n",
            "Epoch 138/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4455 - accuracy: 0.7148 - val_loss: 0.5138 - val_accuracy: 0.6983\n",
            "Epoch 139/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4454 - accuracy: 0.7171 - val_loss: 0.5056 - val_accuracy: 0.6994\n",
            "Epoch 140/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4461 - accuracy: 0.7137 - val_loss: 0.5348 - val_accuracy: 0.6962\n",
            "Epoch 141/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4463 - accuracy: 0.7165 - val_loss: 0.5131 - val_accuracy: 0.7047\n",
            "Epoch 142/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4447 - accuracy: 0.7199 - val_loss: 0.5114 - val_accuracy: 0.7026\n",
            "Epoch 143/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4474 - accuracy: 0.7106 - val_loss: 0.5116 - val_accuracy: 0.6940\n",
            "Epoch 144/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4416 - accuracy: 0.7159 - val_loss: 0.5303 - val_accuracy: 0.6972\n",
            "Epoch 145/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4488 - accuracy: 0.7105 - val_loss: 0.5204 - val_accuracy: 0.6802\n",
            "Epoch 146/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.4407 - accuracy: 0.7183 - val_loss: 0.5273 - val_accuracy: 0.6887\n",
            "Epoch 147/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4401 - accuracy: 0.7166 - val_loss: 0.5129 - val_accuracy: 0.6962\n",
            "Epoch 148/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4412 - accuracy: 0.7214 - val_loss: 0.5115 - val_accuracy: 0.6844\n",
            "Epoch 149/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4430 - accuracy: 0.7127 - val_loss: 0.4950 - val_accuracy: 0.7015\n",
            "Epoch 150/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4449 - accuracy: 0.7148 - val_loss: 0.5425 - val_accuracy: 0.6834\n",
            "Epoch 151/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4334 - accuracy: 0.7203 - val_loss: 0.4984 - val_accuracy: 0.6940\n",
            "Epoch 152/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4431 - accuracy: 0.7176 - val_loss: 0.4971 - val_accuracy: 0.7079\n",
            "Epoch 153/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4418 - accuracy: 0.7121 - val_loss: 0.5027 - val_accuracy: 0.7004\n",
            "Epoch 154/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4340 - accuracy: 0.7210 - val_loss: 0.5250 - val_accuracy: 0.6930\n",
            "Epoch 155/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4339 - accuracy: 0.7220 - val_loss: 0.5293 - val_accuracy: 0.6855\n",
            "Epoch 156/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4365 - accuracy: 0.7192 - val_loss: 0.5293 - val_accuracy: 0.6855\n",
            "Epoch 157/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4368 - accuracy: 0.7182 - val_loss: 0.5137 - val_accuracy: 0.6898\n",
            "Epoch 158/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4353 - accuracy: 0.7201 - val_loss: 0.5117 - val_accuracy: 0.6983\n",
            "Epoch 159/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.4288 - accuracy: 0.7184 - val_loss: 0.4874 - val_accuracy: 0.6951\n",
            "Epoch 160/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4398 - accuracy: 0.7165 - val_loss: 0.5246 - val_accuracy: 0.6919\n",
            "Epoch 161/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.4387 - accuracy: 0.7153 - val_loss: 0.5026 - val_accuracy: 0.7004\n",
            "Epoch 162/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4318 - accuracy: 0.7220 - val_loss: 0.5139 - val_accuracy: 0.6930\n",
            "Epoch 163/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.4407 - accuracy: 0.7183 - val_loss: 0.5243 - val_accuracy: 0.6834\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "error",
          "ename": "KeyboardInterrupt",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-10-af28ce61f88a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      8\u001b[0m       \u001b[0mvalidation_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdb_valid\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      9\u001b[0m       \u001b[0mvalidation_freq\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m       epochs=500,)\n\u001b[0m\u001b[1;32m     11\u001b[0m       \u001b[0;31m# callbacks=[early_stopping])\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py\u001b[0m in \u001b[0;36m_method_wrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m     64\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_method_wrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     65\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_in_multi_worker_mode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m  \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 66\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     67\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     68\u001b[0m     \u001b[0;31m# Running inside `run_distribute_coordinator` already.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[1;32m    834\u001b[0m       data_handler._initial_epoch = (  # pylint: disable=protected-access\n\u001b[1;32m    835\u001b[0m           self._maybe_load_initial_epoch_from_ckpt(initial_epoch))\n\u001b[0;32m--> 836\u001b[0;31m       \u001b[0;32mfor\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0miterator\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menumerate_epochs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    837\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreset_metrics\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    838\u001b[0m         \u001b[0mcallbacks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_epoch_begin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/data_adapter.py\u001b[0m in \u001b[0;36menumerate_epochs\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m   1126\u001b[0m         \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1127\u001b[0m       \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_adapter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshould_recreate_iterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1128\u001b[0;31m         \u001b[0mdata_iterator\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1129\u001b[0m       \u001b[0;32myield\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata_iterator\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1130\u001b[0m       \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_adapter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_epoch_end\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/dataset_ops.py\u001b[0m in \u001b[0;36m__iter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    402\u001b[0m     \"\"\"\n\u001b[1;32m    403\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecuting_eagerly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minside_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 404\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0miterator_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOwnedIterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    405\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    406\u001b[0m       raise RuntimeError(\"__iter__() is only supported inside of tf.function \"\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/iterator_ops.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, dataset, components, element_spec)\u001b[0m\n\u001b[1;32m    593\u001b[0m           context.context().device_spec.device_type != \"CPU\"):\n\u001b[1;32m    594\u001b[0m         \u001b[0;32mwith\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/cpu:0\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 595\u001b[0;31m           \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_create_iterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    596\u001b[0m       \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    597\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_create_iterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/iterator_ops.py\u001b[0m in \u001b[0;36m_create_iterator\u001b[0;34m(self, dataset)\u001b[0m\n\u001b[1;32m    599\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_create_iterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    600\u001b[0m     \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 601\u001b[0;31m     \u001b[0mdataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_apply_options\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    602\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    603\u001b[0m     \u001b[0;31m# Store dataset reference to ensure that dataset is alive when this iterator\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/dataset_ops.py\u001b[0m in \u001b[0;36m_apply_options\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    373\u001b[0m       \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    374\u001b[0m         dataset = _OptimizeDataset(dataset, graph_rewrites,\n\u001b[0;32m--> 375\u001b[0;31m                                    graph_rewrite_configs)\n\u001b[0m\u001b[1;32m    376\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    377\u001b[0m     \u001b[0;31m# (3) Apply autotune options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/dataset_ops.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, input_dataset, optimizations, optimization_configs)\u001b[0m\n\u001b[1;32m   4363\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_optimizations\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   4364\u001b[0m         \u001b[0moptimization_configs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptimization_configs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 4365\u001b[0;31m         **self._flat_structure)\n\u001b[0m\u001b[1;32m   4366\u001b[0m     \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_OptimizeDataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_dataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariant_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   4367\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_dataset_ops.py\u001b[0m in \u001b[0;36moptimize_dataset\u001b[0;34m(input_dataset, optimizations, output_types, output_shapes, optimization_configs, name)\u001b[0m\n\u001b[1;32m   3677\u001b[0m         \u001b[0mtld\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mop_callbacks\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_dataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizations\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"output_types\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   3678\u001b[0m         \u001b[0moutput_types\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"output_shapes\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput_shapes\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"optimization_configs\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3679\u001b[0;31m         optimization_configs)\n\u001b[0m\u001b[1;32m   3680\u001b[0m       \u001b[0;32mreturn\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   3681\u001b[0m     \u001b[0;32mexcept\u001b[0m \u001b[0m_core\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_FallbackException\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "VclNtUAIYu99",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import os\n",
        "path = \"/content/drive/My Drive/Marko/Model_learning/graduated_project/dataset/weights_and_model\"\n",
        "save_path = os.path.join(path,\"vgg_transfer.h5\") \n",
        "newnet.save(save_path)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "mwHMeszcdQar",
        "colab_type": "code",
        "outputId": "d4fab8b9-2d34-4479-b535-7fdd3bf484cf",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "fine_tune = tf.keras.models.load_model(\"/content/drive/My Drive/Marko/Model_learning/graduated_project/dataset/weights_and_model/vgg_transfer.h5\")\n",
        "fine_tune.compile(optimizer=optimizers.Adam(lr=0.0001),\n",
        "        loss = losses.CategoricalCrossentropy(from_logits=True),\n",
        "        metrics=[\"accuracy\"])\n",
        "fine_tune.fit(db_train, \n",
        "      validation_data=db_valid,\n",
        "      validation_freq=1,\n",
        "      epochs=500,)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Epoch 1/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3764 - accuracy: 0.7302 - val_loss: 0.4930 - val_accuracy: 0.6972\n",
            "Epoch 2/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3796 - accuracy: 0.7338 - val_loss: 0.4912 - val_accuracy: 0.7132\n",
            "Epoch 3/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3701 - accuracy: 0.7321 - val_loss: 0.4877 - val_accuracy: 0.7068\n",
            "Epoch 4/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3766 - accuracy: 0.7314 - val_loss: 0.5191 - val_accuracy: 0.6983\n",
            "Epoch 5/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3727 - accuracy: 0.7345 - val_loss: 0.4763 - val_accuracy: 0.6951\n",
            "Epoch 6/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3804 - accuracy: 0.7270 - val_loss: 0.4706 - val_accuracy: 0.7047\n",
            "Epoch 7/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3720 - accuracy: 0.7346 - val_loss: 0.4906 - val_accuracy: 0.7026\n",
            "Epoch 8/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3760 - accuracy: 0.7299 - val_loss: 0.4476 - val_accuracy: 0.7207\n",
            "Epoch 9/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3718 - accuracy: 0.7329 - val_loss: 0.5062 - val_accuracy: 0.7026\n",
            "Epoch 10/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3810 - accuracy: 0.7315 - val_loss: 0.4897 - val_accuracy: 0.6930\n",
            "Epoch 11/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3701 - accuracy: 0.7338 - val_loss: 0.5075 - val_accuracy: 0.7068\n",
            "Epoch 12/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3852 - accuracy: 0.7297 - val_loss: 0.4689 - val_accuracy: 0.7154\n",
            "Epoch 13/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3779 - accuracy: 0.7320 - val_loss: 0.5318 - val_accuracy: 0.6887\n",
            "Epoch 14/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3739 - accuracy: 0.7287 - val_loss: 0.4929 - val_accuracy: 0.7122\n",
            "Epoch 15/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3764 - accuracy: 0.7371 - val_loss: 0.5007 - val_accuracy: 0.6983\n",
            "Epoch 16/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3781 - accuracy: 0.7340 - val_loss: 0.4935 - val_accuracy: 0.7164\n",
            "Epoch 17/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3706 - accuracy: 0.7357 - val_loss: 0.5242 - val_accuracy: 0.6919\n",
            "Epoch 18/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3831 - accuracy: 0.7293 - val_loss: 0.4657 - val_accuracy: 0.7026\n",
            "Epoch 19/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3751 - accuracy: 0.7317 - val_loss: 0.4867 - val_accuracy: 0.7058\n",
            "Epoch 20/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3814 - accuracy: 0.7285 - val_loss: 0.5063 - val_accuracy: 0.6962\n",
            "Epoch 21/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3748 - accuracy: 0.7359 - val_loss: 0.5055 - val_accuracy: 0.6898\n",
            "Epoch 22/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3754 - accuracy: 0.7332 - val_loss: 0.5068 - val_accuracy: 0.7047\n",
            "Epoch 23/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3711 - accuracy: 0.7329 - val_loss: 0.4998 - val_accuracy: 0.7036\n",
            "Epoch 24/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3844 - accuracy: 0.7333 - val_loss: 0.5160 - val_accuracy: 0.7026\n",
            "Epoch 25/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3718 - accuracy: 0.7349 - val_loss: 0.4848 - val_accuracy: 0.7143\n",
            "Epoch 26/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3722 - accuracy: 0.7346 - val_loss: 0.4912 - val_accuracy: 0.6994\n",
            "Epoch 27/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3784 - accuracy: 0.7340 - val_loss: 0.4991 - val_accuracy: 0.6951\n",
            "Epoch 28/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3812 - accuracy: 0.7329 - val_loss: 0.4992 - val_accuracy: 0.7079\n",
            "Epoch 29/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3693 - accuracy: 0.7349 - val_loss: 0.5027 - val_accuracy: 0.6962\n",
            "Epoch 30/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3793 - accuracy: 0.7334 - val_loss: 0.5254 - val_accuracy: 0.6908\n",
            "Epoch 31/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3756 - accuracy: 0.7329 - val_loss: 0.5125 - val_accuracy: 0.7058\n",
            "Epoch 32/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3706 - accuracy: 0.7349 - val_loss: 0.5153 - val_accuracy: 0.6887\n",
            "Epoch 33/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3720 - accuracy: 0.7350 - val_loss: 0.4808 - val_accuracy: 0.6994\n",
            "Epoch 34/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3835 - accuracy: 0.7312 - val_loss: 0.5331 - val_accuracy: 0.6951\n",
            "Epoch 35/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3794 - accuracy: 0.7323 - val_loss: 0.4962 - val_accuracy: 0.7122\n",
            "Epoch 36/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3758 - accuracy: 0.7301 - val_loss: 0.4869 - val_accuracy: 0.7058\n",
            "Epoch 37/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3658 - accuracy: 0.7352 - val_loss: 0.5187 - val_accuracy: 0.6812\n",
            "Epoch 38/500\n",
            "264/264 [==============================] - 6s 21ms/step - loss: 0.3761 - accuracy: 0.7305 - val_loss: 0.4858 - val_accuracy: 0.7004\n",
            "Epoch 39/500\n",
            "264/264 [==============================] - 5s 21ms/step - loss: 0.3702 - accuracy: 0.7343 - val_loss: 0.4954 - val_accuracy: 0.7175\n",
            "Epoch 40/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3771 - accuracy: 0.7297 - val_loss: 0.5184 - val_accuracy: 0.7015\n",
            "Epoch 41/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3706 - accuracy: 0.7343 - val_loss: 0.4809 - val_accuracy: 0.7090\n",
            "Epoch 42/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3737 - accuracy: 0.7334 - val_loss: 0.5020 - val_accuracy: 0.6930\n",
            "Epoch 43/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3781 - accuracy: 0.7283 - val_loss: 0.4836 - val_accuracy: 0.7058\n",
            "Epoch 44/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3714 - accuracy: 0.7355 - val_loss: 0.4931 - val_accuracy: 0.6962\n",
            "Epoch 45/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3787 - accuracy: 0.7340 - val_loss: 0.5193 - val_accuracy: 0.7036\n",
            "Epoch 46/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3746 - accuracy: 0.7364 - val_loss: 0.5028 - val_accuracy: 0.7004\n",
            "Epoch 47/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3733 - accuracy: 0.7321 - val_loss: 0.4939 - val_accuracy: 0.7068\n",
            "Epoch 48/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3792 - accuracy: 0.7276 - val_loss: 0.5103 - val_accuracy: 0.6919\n",
            "Epoch 49/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3753 - accuracy: 0.7298 - val_loss: 0.4786 - val_accuracy: 0.7132\n",
            "Epoch 50/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3734 - accuracy: 0.7342 - val_loss: 0.4991 - val_accuracy: 0.6898\n",
            "Epoch 51/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3769 - accuracy: 0.7321 - val_loss: 0.4939 - val_accuracy: 0.6898\n",
            "Epoch 52/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3763 - accuracy: 0.7372 - val_loss: 0.4975 - val_accuracy: 0.7004\n",
            "Epoch 53/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3808 - accuracy: 0.7293 - val_loss: 0.4939 - val_accuracy: 0.7175\n",
            "Epoch 54/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3914 - accuracy: 0.7257 - val_loss: 0.5124 - val_accuracy: 0.7079\n",
            "Epoch 55/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3765 - accuracy: 0.7292 - val_loss: 0.4691 - val_accuracy: 0.6887\n",
            "Epoch 56/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3798 - accuracy: 0.7314 - val_loss: 0.5081 - val_accuracy: 0.6962\n",
            "Epoch 57/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3773 - accuracy: 0.7312 - val_loss: 0.5186 - val_accuracy: 0.7004\n",
            "Epoch 58/500\n",
            "264/264 [==============================] - 5s 19ms/step - loss: 0.3784 - accuracy: 0.7307 - val_loss: 0.4943 - val_accuracy: 0.7100\n",
            "Epoch 59/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3758 - accuracy: 0.7315 - val_loss: 0.5155 - val_accuracy: 0.6972\n",
            "Epoch 60/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3749 - accuracy: 0.7331 - val_loss: 0.5189 - val_accuracy: 0.6887\n",
            "Epoch 61/500\n",
            "264/264 [==============================] - 5s 20ms/step - loss: 0.3776 - accuracy: 0.7288 - val_loss: 0.5047 - val_accuracy: 0.7068\n",
            "Epoch 62/500\n",
            "133/264 [==============>...............] - ETA: 2s - loss: 0.3894 - accuracy: 0.7375"
          ],
          "name": "stdout"
        },
        {
          "output_type": "error",
          "ename": "KeyboardInterrupt",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-18-6df38b172afb>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      6\u001b[0m       \u001b[0mvalidation_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdb_valid\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      7\u001b[0m       \u001b[0mvalidation_freq\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m       epochs=500,)\n\u001b[0m",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py\u001b[0m in \u001b[0;36m_method_wrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m     64\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_method_wrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     65\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_in_multi_worker_mode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m  \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 66\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     67\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     68\u001b[0m     \u001b[0;31m# Running inside `run_distribute_coordinator` already.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[1;32m    846\u001b[0m                 batch_size=batch_size):\n\u001b[1;32m    847\u001b[0m               \u001b[0mcallbacks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_train_batch_begin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 848\u001b[0;31m               \u001b[0mtmp_logs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrain_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    849\u001b[0m               \u001b[0;31m# Catch OutOfRangeError for Datasets of unknown size.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    850\u001b[0m               \u001b[0;31m# This blocks until the batch has finished executing.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/def_function.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m    578\u001b[0m         \u001b[0mxla_context\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mExit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    579\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 580\u001b[0;31m       \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    581\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    582\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mtracing_count\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_tracing_count\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/def_function.py\u001b[0m in \u001b[0;36m_call\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m    609\u001b[0m       \u001b[0;31m# In this case we have created variables on the first call, so we run the\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    610\u001b[0m       \u001b[0;31m# defunned version which is guaranteed to never create variables.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 611\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_stateless_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m  \u001b[0;31m# pylint: disable=not-callable\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    612\u001b[0m     \u001b[0;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_stateful_fn\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    613\u001b[0m       \u001b[0;31m# Release the lock early so that multiple threads can perform the call\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/function.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   2418\u001b[0m     \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_lock\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2419\u001b[0m       \u001b[0mgraph_function\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_maybe_define_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2420\u001b[0;31m     \u001b[0;32mreturn\u001b[0m \u001b[0mgraph_function\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_filtered_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m  \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   2421\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   2422\u001b[0m   \u001b[0;34m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/function.py\u001b[0m in \u001b[0;36m_filtered_call\u001b[0;34m(self, args, kwargs)\u001b[0m\n\u001b[1;32m   1663\u001b[0m          if isinstance(t, (ops.Tensor,\n\u001b[1;32m   1664\u001b[0m                            resource_variable_ops.BaseResourceVariable))),\n\u001b[0;32m-> 1665\u001b[0;31m         self.captured_inputs)\n\u001b[0m\u001b[1;32m   1666\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1667\u001b[0m   \u001b[0;32mdef\u001b[0m \u001b[0m_call_flat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcaptured_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcancellation_manager\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/function.py\u001b[0m in \u001b[0;36m_call_flat\u001b[0;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[1;32m   1744\u001b[0m       \u001b[0;31m# No tape is watching; skip to running the function.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1745\u001b[0m       return self._build_call_outputs(self._inference_function.call(\n\u001b[0;32m-> 1746\u001b[0;31m           ctx, args, cancellation_manager=cancellation_manager))\n\u001b[0m\u001b[1;32m   1747\u001b[0m     forward_backward = self._select_forward_and_backward_functions(\n\u001b[1;32m   1748\u001b[0m         \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/function.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[1;32m    596\u001b[0m               \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    597\u001b[0m               \u001b[0mattrs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mattrs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 598\u001b[0;31m               ctx=ctx)\n\u001b[0m\u001b[1;32m    599\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    600\u001b[0m           outputs = execute.execute_with_cancellation(\n",
            "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36mquick_execute\u001b[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[1;32m     58\u001b[0m     \u001b[0mctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     59\u001b[0m     tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001b[0;32m---> 60\u001b[0;31m                                         inputs, attrs, num_outputs)\n\u001b[0m\u001b[1;32m     61\u001b[0m   \u001b[0;32mexcept\u001b[0m \u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     62\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "6EdTWiUzjWQZ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "fine_tune.save(save_path)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "NO7HuOMYpiBK",
        "colab_type": "text"
      },
      "source": [
        "## RESNET transfer"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WnIe_IfRphOP",
        "colab_type": "code",
        "outputId": "70719322-17c5-45c9-bcaa-baa97b89ecde",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 218
        }
      },
      "source": [
        "resnet = keras.applications.ResNet50V2(weights=\"imagenet\",include_top=False,pooling=\"max\")\n",
        "resnet.tranable = False\n",
        "newnet2 = Sequential([\n",
        "            resnet,\n",
        "            Dense(7)\n",
        "])\n",
        "newnet2.build(input_shape=(None,64,64,3))\n",
        "newnet2.summary()"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Model: \"sequential_3\"\n",
            "_________________________________________________________________\n",
            "Layer (type)                 Output Shape              Param #   \n",
            "=================================================================\n",
            "resnet50v2 (Model)           (None, 2048)              23564800  \n",
            "_________________________________________________________________\n",
            "dense_3 (Dense)              (None, 7)                 14343     \n",
            "=================================================================\n",
            "Total params: 23,579,143\n",
            "Trainable params: 23,533,703\n",
            "Non-trainable params: 45,440\n",
            "_________________________________________________________________\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8QE_Y9KXp_Ya",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "newnet2.compile(optimizer=optimizers.Adam(lr=0.0001),\n",
        "        loss = losses.CategoricalCrossentropy(from_logits=True),\n",
        "        metrics=[\"accuracy\"])\n",
        "newnet2.fit(db_train, \n",
        "      validation_data=db_valid,\n",
        "      validation_freq=1,\n",
        "      epochs=500,)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "U3dZi2z0qkzx",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        ""
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}