{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "CIFAR10_Full_TPU",
      "provenance": [],
      "collapsed_sections": [],
      "machine_shape": "hm",
      "include_colab_link": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "accelerator": "TPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/sayakpaul/Training-BatchNorm-and-Only-BatchNorm/blob/master/CIFAR10_Full_TPU_Different_LR_Schedules.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "zRrLNnbuz0fG",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Authenticate yourself to use the TPUs\n",
        "import os\n",
        "\n",
        "IS_COLAB_BACKEND = 'COLAB_GPU' in os.environ  # this is always set on Colab, the value is 0 or 1 depending on GPU presence\n",
        "if IS_COLAB_BACKEND:\n",
        "  from google.colab import auth\n",
        "  # Authenticates the Colab machine and also the TPU using your\n",
        "  # credentials so that they can access your private GCS buckets.\n",
        "  auth.authenticate_user()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "lIYdn1woOS1n",
        "outputId": "360424bf-1461-40eb-b836-41537dcc7348",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "# TensorFlow Imports\n",
        "import tensorflow as tf\n",
        "print(tf.__version__)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "2.2.0-rc3\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "UpKUNu1Uz3ld",
        "colab_type": "code",
        "outputId": "b1bde8e9-dc24-4c6c-8ace-a7c854af0e89",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 731
        }
      },
      "source": [
        "# Detect hardware\n",
        "try:\n",
        "  tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection\n",
        "except ValueError:\n",
        "  tpu = None\n",
        "  gpus = tf.config.experimental.list_logical_devices(\"GPU\")\n",
        "    \n",
        "# Select appropriate distribution strategy\n",
        "if tpu:\n",
        "  tf.config.experimental_connect_to_cluster(tpu)\n",
        "  tf.tpu.experimental.initialize_tpu_system(tpu)\n",
        "  strategy = tf.distribute.experimental.TPUStrategy(tpu) # Going back and forth between TPU and host is expensive. Better to run 128 batches on the TPU before reporting back.\n",
        "  print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])  \n",
        "elif len(gpus) > 1:\n",
        "  strategy = tf.distribute.MirroredStrategy([gpu.name for gpu in gpus])\n",
        "  print('Running on multiple GPUs ', [gpu.name for gpu in gpus])\n",
        "elif len(gpus) == 1:\n",
        "  strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU\n",
        "  print('Running on single GPU ', gpus[0].name)\n",
        "else:\n",
        "  strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU\n",
        "  print('Running on CPU')\n",
        "print(\"Number of accelerators: \", strategy.num_replicas_in_sync)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Initializing the TPU system: grpc://10.109.199.10:8470\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Initializing the TPU system: grpc://10.109.199.10:8470\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Clearing out eager caches\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Clearing out eager caches\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Finished initializing TPU system.\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Finished initializing TPU system.\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Found TPU system:\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:Found TPU system:\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Num TPU Cores: 8\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Num TPU Cores: 8\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Num TPU Workers: 1\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Num TPU Workers: 1\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Num TPU Cores Per Worker: 8\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Num TPU Cores Per Worker: 8\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:localhost/replica:0/task:0/device:CPU:0, CPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:localhost/replica:0/task:0/device:CPU:0, CPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:localhost/replica:0/task:0/device:XLA_CPU:0, XLA_CPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:localhost/replica:0/task:0/device:XLA_CPU:0, XLA_CPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:CPU:0, CPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:CPU:0, CPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:0, TPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:0, TPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:1, TPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:1, TPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:2, TPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:2, TPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:3, TPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:3, TPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:4, TPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:4, TPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:5, TPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:5, TPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:6, TPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:6, TPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:7, TPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU:7, TPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU_SYSTEM:0, TPU_SYSTEM, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:TPU_SYSTEM:0, TPU_SYSTEM, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:XLA_CPU:0, XLA_CPU, 0, 0)\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "INFO:tensorflow:*** Available Device: _DeviceAttributes(/job:worker/replica:0/task:0/device:XLA_CPU:0, XLA_CPU, 0, 0)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "Running on TPU  ['10.109.199.10:8470']\n",
            "Number of accelerators:  8\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "l9tHbooBz95X",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "!pip install wandb -q\n",
        "import wandb\n",
        "wandb.login()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "5-_4ROoYRHJq",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "!wget https://raw.githubusercontent.com/GoogleCloudPlatform/keras-idiomatic-programmer/master/zoo/resnet/resnet_cifar10.py"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "txX7OoEpROgI",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Other imports\n",
        "from tensorflow.keras.layers import *\n",
        "from tensorflow.keras.models import *\n",
        "from wandb.keras import WandbCallback\n",
        "import matplotlib.pyplot as plt\n",
        "import tensorflow as tf\n",
        "import resnet_cifar10\n",
        "import numpy as np\n",
        "import time\n",
        "\n",
        "# Random seed fixation\n",
        "tf.random.set_seed(666)\n",
        "np.random.seed(666)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1NlIHQ-rRWlX",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def get_training_model(optimizer=\"sgd\"):\n",
        "    # ResNet20\n",
        "    n = 2\n",
        "    depth =  n * 9 + 2\n",
        "    n_blocks = ((depth - 2) // 9) - 1\n",
        "\n",
        "    # The input tensor\n",
        "    inputs = Input(shape=(32, 32, 3))\n",
        "\n",
        "    # The Stem Convolution Group\n",
        "    x = resnet_cifar10.stem(inputs)\n",
        "\n",
        "    # The learner\n",
        "    x = resnet_cifar10.learner(x, n_blocks)\n",
        "\n",
        "    # The Classifier for 10 classes\n",
        "    outputs = resnet_cifar10.classifier(x, 10)\n",
        "\n",
        "    # Instantiate the Model\n",
        "    model = Model(inputs, outputs)\n",
        "\n",
        "    model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"])\n",
        "    \n",
        "    return model"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Jp-VUHKiRqo4",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Load the training set of CIFAR10\n",
        "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "dNhaSmKjR2Vq",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "BATCH_SIZE = 128 * strategy.num_replicas_in_sync\n",
        "\n",
        "def normalize(image, label):\n",
        "    image = tf.image.convert_image_dtype(image, tf.float32)\n",
        "    label = tf.cast(label, tf.int32)\n",
        "    return image, label\n",
        "\n",
        "def augment(image,label):\n",
        "    image = tf.image.resize_with_crop_or_pad(image, 40, 40) # Add 8 pixels of padding\n",
        "    image = tf.image.random_crop(image, size=[32, 32, 3]) # Random crop back to 32x32\n",
        "    image = tf.image.random_brightness(image, max_delta=0.5) # Random brightness\n",
        "    image = tf.clip_by_value(image, 0., 1.)\n",
        "\n",
        "    return image, label\n",
        "\n",
        "train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n",
        "train_ds = (\n",
        "    train_ds\n",
        "    .shuffle(1024)\n",
        "    .map(normalize, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n",
        "    .map(augment, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n",
        "    .batch(BATCH_SIZE)\n",
        "    .prefetch(tf.data.experimental.AUTOTUNE)\n",
        ")\n",
        "\n",
        "test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n",
        "test_ds = (\n",
        "    test_ds\n",
        "    .map(normalize, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n",
        "    .batch(BATCH_SIZE)\n",
        "    .prefetch(tf.data.experimental.AUTOTUNE)\n",
        ")"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Ymnw4EirV8HT",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "with strategy.scope():\n",
        "    model = get_training_model()\n",
        "model.summary()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "lNCYm8J2WiMh",
        "colab_type": "text"
      },
      "source": [
        "- Total params: 575,114\n",
        "- **Trainable params: 571,114**\n",
        "- Non-trainable params: 4,000\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8Wg4hyGWjhZU",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "start_lr = 0.00001\n",
        "min_lr = 0.00001\n",
        "max_lr = 0.00005 * strategy.num_replicas_in_sync\n",
        "rampup_epochs = 5\n",
        "sustain_epochs = 0\n",
        "exp_decay = .8"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "aECy9PlIjlEl",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def lrfn(epoch):\n",
        "    def lr(epoch, start_lr, min_lr, max_lr, rampup_epochs, sustain_epochs, exp_decay):\n",
        "        if epoch < rampup_epochs:\n",
        "            lr = (max_lr - start_lr)/rampup_epochs * epoch + start_lr\n",
        "        elif epoch < rampup_epochs + sustain_epochs:\n",
        "            lr = max_lr\n",
        "        else:\n",
        "            lr = (max_lr - min_lr) * exp_decay**(epoch-rampup_epochs-sustain_epochs) + min_lr\n",
        "        return lr\n",
        "    return lr(epoch, start_lr, min_lr, max_lr, rampup_epochs, sustain_epochs, exp_decay)\n",
        "    \n",
        "lr_callback = tf.keras.callbacks.LearningRateScheduler(lambda epoch: lrfn(epoch), verbose=True)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "uEKl2gwUjnZX",
        "colab_type": "code",
        "outputId": "2c0577fd-75b5-4183-c1ea-0bf7be600133",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 282
        }
      },
      "source": [
        "rng = [i for i in range(10)]\n",
        "y = [lrfn(x) for x in rng]\n",
        "plt.plot(rng, [lrfn(x) for x in rng])\n",
        "print(y[0], y[-1])"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "1e-05 0.00016974400000000002\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "display_data",
          "data": {
            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY0AAAD4CAYAAAAQP7oXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nO3deXxU5fX48c8hCbuELawBghDAsMMEBXfBsmhBK8riAogCFkpdfvWL1mpd2qqtRVFckLCKBqRUUxeoFhVXyIR9i4ZFdghb2BOSnN8fc21DzDIJJHcyc96vF6/XzHOf+9xzhyQn97kn9xFVxRhjjPFHJbcDMMYYU3FY0jDGGOM3SxrGGGP8ZknDGGOM3yxpGGOM8Vu42wGUpfr162tMTIzbYRhjTIWSkpJyUFWjCtoW1EkjJiYGr9frdhjGGFOhiMiPhW2z6SljjDF+s6RhjDHGb5Y0jDHG+M2ShjHGGL9Z0jDGGOM3v5KGiPQTkVQRSRORSQVsryIi853ty0UkJs+2R5z2VBHpW4Ixp4jICX+OYYwxpnwUmzREJAyYCvQH4oBhIhKXr9to4IiqtgYmA885+8YBQ4H2QD/gVREJK25MEfEAdfw5hjHGmPLjz5VGDyBNVbeqahaQCAzK12cQMNt5vRDoLSLitCeqaqaqbgPSnPEKHdNJKH8FHvbzGMaEnFNZ2byzYgcnM7PdDsWEGH+SRlNgZ573u5y2AvuoajaQAdQrYt+ixpwAJKnqXj+PcQ4RGSMiXhHxpqen+3F6xlQsObnKxHdW8ciidfzmnVVk5+S6HZIJIQF1I1xEmgC3Ai+XdgxVnaaqHlX1REUV+FfwxlRof/pwE59uOkCfSxqydPMBnvlwk9shmRDiT9LYDTTL8z7aaSuwj4iEA5HAoSL2Lay9K9AaSBOR7UB1EUkr5hjGhIy5325nxtfbGNkrhukjPIy+oiWzvtnOrK+3uR2aCRH+JI1kIFZEWopIZXw3tpPy9UkCRjivBwNL1beObBIw1Kl8agnEAisKG1NVP1TVRqoao6oxwCnnxndRxzAmJHyWeoAnkjbQu10D/nCjr27k0QGX0OeShjz1wUaWbt7vcoQmFBSbNJz7BxOAJcAmYIGqbhCRp0RkoNMtAajnXBU8CExy9t0ALAA2AouB8aqaU9iYxYRS4DGMCQWb9h5jwryVtGtUiynDuhJWyVcDElZJmDKsC3FNajHh7VVs2JPhcqQm2Ekw/7Lu8XjUnnJrKroDx85w09SvyVHlvfGX0ziy2s/67Hf6qMJ74y+nUWRVFyI1wUJEUlTVU9C2gLoRbow516msbO6Z4+Xo6bMkjIgvMGEANKxVlYQR8Rw/c5bRs5OtFNeUGUsaxgSo3FzlgfmrWb87gylDu9KhaWSR/eOa1OKV4d3YtPcYv01cRU5u8M4iGPdY0jAmQD27eDNLNuznsRvi6BPX0K99rm3XgCd+2Z5PNx3gT1aKa8pAUK/cZ0xF9fbyHUxbtpW7erZg1OUxJdp3RK8Yth08yYyvt9GyfnXu7Fmy/Y0piiUNYwLMsu/T+cP767mmbRSP3xhHaZ6W84cb49h5+BRPJG0gum51rm3boAwiNaHIpqeMCSCp+44zft5KYhvU5JXh3QgPK923qK8UtyvtGtViwryVbNp77AJHakKVJQ1jAkT68UzunpVMtcphzBgZT80q5zcRUKNKOAkjPdSsGs7oWckcOHbmAkVqQpklDWMCwOmsHO6Z4+XwySwSRsTTpHbBpbUl1TiyGgkj4jl6+iyjZ3s5lWWluOb8WNIwxmW5ucqDC1azdtdRXhrahY7RRZfWllSHppFMGdqVDXsyuD9xtZXimvNiScMYlz2/JJWP1+/j9wMu4RftG5XJMfrENeSxG+L498b9PPuxleKa0rPqKWNclLhiB69/sYXbL23O6CtalumxRl0ew/ZDJ3nzy23E1K/B7Ze2KNPjmeBkScMYl3yddpDH3lvPVW2ieHJg+1KV1paEiPD4jXHsOHyKx9/fQHSd6lzdxtacMSVj01PGuOCH/ccZ91YKraJqMnV411KX1pZUeFglXhnejdgGNRk/byWp+46Xy3FN8LCkYUw5O3gik1GzkqkSHkbCSA8XVY0o1+PXrBLOjJHxVK8cxt2zkjlw3Epxjf8saRhTjs6czeHeOV4OnsgkYYSH6DrVXYmjSW1fKe7hk1ncO9vL6awcV+IwFY8lDWPKSW6u8tC7a1i98ygvDulC52a1XY2nY3QkLw3twtrdGTy4YDW5Vopr/OBX0hCRfiKSKiJpIvKzFfOc5VznO9uXi0hMnm2POO2pItK3uDFFJEFE1ojIWhFZKCI1nfaRIpIuIqudf/ecz4kbU95e+CSVD9fuZVK/dvTr0NjtcAD4RftG/H7AJXy8fh/PLdnsdjimAig2aYhIGDAV6A/EAcNEJC5ft9HAEWc978nAc86+cfjW/24P9ANeFZGwYsZ8QFU7q2onYAe+ZWF/Ml9Vuzj/ppfulI0pfwu8O5n62RaG9WjGmKsudjucc4y+oiW3X9qcN77Yyjsrdrgdjglw/lxp9ADSVHWrqmYBicCgfH0GAbOd1wuB3uKrHxwEJKpqpqpuA9Kc8QodU1WPATj7VwPsmtlUaN9sOciji9ZxZWx9nhrUocxLa0tKRHhyYHuuahPFY++t56sfDrodkglg/iSNpsDOPO93OW0F9lHVbCADqFfEvkWOKSIzgX1AO+DlPP1uyTNt1cyP2I1xVdqBE4ybm0LL+jWYens3IsqptLakwsMqMXV4V1pH1eS+eSn8sN9KcU3BAvIrWFVHAU2ATcAQp/lfQIwzbfUJ/7uyOYeIjBERr4h409PTyyVeYwpy6ITvqbWVwysxY2Q8tcq5tLakLqoaQcJID1XCwxg1K5n045luh2QCkD9JYzeQ97f6aKetwD4iEg5EAoeK2LfYMVU1B9+01S3O+0Oq+tNX8XSge0HBquo0VfWoqicqyv7a1bjjzNkcxsxNYf+xM7x5l4dmdd0prS2p6DrVSRjh4eCJTO6d4+XMWSvFNefyJ2kkA7Ei0lJEKuO7sZ2Ur08SMMJ5PRhYqqrqtA91qqtaArHAisLGFJ/W8N97GgOBzc77vOUmA/FdhRgTcFSVhxeuJeXHI0we0oWuzeu4HVKJdG5WmxeHdGHNrqM8tGCNleKacxT77ClVzRaRCcASIAyYoaobROQpwKuqSUACMFdE0oDD+JIATr8FwEYgGxjvXEFQyJiVgNkiUgsQYA1wnxPKRBEZ6IxzGBh5QT4BYy6wyZ98T9KaPTzcry0DOgZGaW1J9evQmEn92vGXjzfTol51Hu7Xzu2QTIAQ3wVBcPJ4POr1et0Ow4SQf6Ts4qF31zDE04xnb+kYcJVSJaGqPPrPdbyzYifPD+7EbR6rPQkVIpKiqp6CttlTbo25QL7beohJi9bSq1U9nrk58EprS0pEeGpQB3YdOc2ji9YRXbsavVrXdzss47KArJ4ypqLZmn6CsXNTaF63Oq/d3j1gS2tLKiKsElNv70bL+jUY91YKaQdOuB2ScVlwfGUb46LDJ7O4e1YyYZWEmSN7EFk9sEtrS6pW1QhmjIyncnglRs1awaETVoobyixpGHMeMrNzGDvXy56MM7x5V3ea16sYpbUl1axudd68y8OBY5mMmZtipbghzJKGMaWkqkz6xzqStx/hb7d2pnuLum6HVKa6Nq/D5CFdSPnxCL9buNZKcUOUJQ1jSuml//zAP1ft5qHr2zCwcxO3wykXAzo25uF+bfnXmj1M/vR7t8MxLrDqKWNK4b1Vu3nx0x+4pVs0E65r7XY45eq+q1vx48FTvLw0jRb1ajC4e7TbIZlyZEnDmBJase0wDy9cy6Ut6/KXX1Xsv8UoDRHhmZs7sPPIKR5ZtJamtavRs1U9t8My5cSmp4wpge0HTzJ2rpfoOtV4487uVA4PzW+hiLBKvHZ7d5rXrc7YuV5W7zzqdkimnITmV7wxpXD0lK+0FmDGyHhqV6/sckTuiqwewaxRvhLj29/8jm+22DococCShjF+yMrOZezcFHYdOc20uzzE1K/hdkgBoVnd6iwc14smtasxcmYyn27c73ZIpoxZ0jCmGKrKpEVrWb7tMM8P7kR8THCX1pZUw1pVWTC2J5c0uoixb6Xwz1W73A7JlCFLGsYU45WlaSxauZv7+8RyU9f8i1YagDo1KjPv3svoEVOXB+avYc63290OyZQRSxrGFOH91bt54ZPvublrU37bO9btcAJazSrhzBwVT59LGvL4+xuY+lkawfwU7VBlScOYQqT8eJjfLVxLj5i6Ff4x5+WlakQYr93RjZu7NuWvS1L5y8ebLXEEGfs7DWMK8OOhk9w7J4UmkVV5487uVAkPczukCiMirBIv3NqZi6qGM23ZVo6dPsufbu5IWCVLusHAkoYx+WScOsuoWcnkqjJjZDx1aoR2aW1pVKokPDmwPZHVInh5aRrHz2QzeUiXkP27lmDi1/+giPQTkVQRSRORSQVsryIi853ty0UkJs+2R5z2VBHpW9yYIpIgImtEZK2ILBSRmsUdw5gLJSs7l3FvpbDz8CneuKM7F0fVdDukCktEeOgXbfn9gEv4cN1e7p3j5XSWPR23ois2aYhIGDAV6A/EAcNEJC5ft9HAEVVtDUwGnnP2jcO3Xnh7oB/wqoiEFTPmA6raWVU7ATuACUUdw5gLRVX5/T/X8e3WQzz7q05cerE9GuNCuPeqi3nulo58+UM6dyYsJ+P0WbdDMufBnyuNHkCaqm5V1SwgERiUr88gYLbzeiHQW3x3DQcBiaqaqarbgDRnvELHVNVjAM7+1QAt5hjGXBCvfr6Fd1N2MfG61txiD+G7oIbEN+eV4d1Ys+sow6Z9x0FbyKnC8idpNAV25nm/y2krsI+qZgMZQL0i9i1yTBGZCewD2gEvF3OMc4jIGBHxiog3PT3dj9MzBj5Yu4e/LkllYOcmPHB9G7fDCUoDOjZm+oh4th48wW2vf8vuo6fdDsmUQkDelVLVUUATYBMwpIT7TlNVj6p6oqKiyiQ+E1xW7jjCgwvW4GlRh+cHd7LS2jJ0dZso5o6+lPQTmdz62jdsSbc1xysaf5LGbqBZnvfRTluBfUQkHIgEDhWxb7FjqmoOvmmrW4o5hjGltvPwKe6d7aVRLV9pbdUIK60ta/ExdUkccxmZ2bnc9vq3rN+d4XZIpgT8SRrJQKyItBSRyvhubCfl65MEjHBeDwaWqu8vepKAoU7lU0sgFlhR2Jji0xr+e09jILC5mGMYUyoZp32ltWdzcpkxMp56Nau4HVLIaN8kknfH9aRKeCWGTfuO5O2H3Q7J+KnYpOHcP5gALME3XbRAVTeIyFMiMtDplgDUE5E04EFgkrPvBmABsBFYDIxX1ZzCxgQEmC0i64B1QGPgqaKOYUxpnM3J5dfzUth+8CSv39md1g2stLa8XRxVk3fv60XURVW4M2E5n6cecDsk4wcJ5l/WPR6Per1et8MwAUZVeWTROhKTd/LXwZ241dOs+J1MmTl4IpO7Elbww4HjvDikKzd0aux2SCFPRFJU1VPQtoC8EW5MWXpj2VYSk3cy/tpWljACQP2aVXhnzGV0jq7Nb95ZSeKKHW6HZIpgScOElI/X7eXZjzdzY6fGPHR9W7fDMY7IahHMHX0pV8ZGMWnROqYt2+J2SKYQljRMyFi98yj3z19N1+a1+dutnalkD9ALKNUqh/HmXR5u6NiYP3+0mb8tSbUn5AYge2ChCQm7jpzintleGtSqwpt3eay0NkBVDq/ElGFduahqOK98lsaxM2f54y/bW4IPIJY0TNA7duYsd89KJjM7h8Qxl1LfSmsDWlgl4S+/6kitahFMW7aV42eyeX5wJyLCbGIkEFjSMEEtOyeX8fNWsjX9JLPv7kHrBhe5HZLxg4jwSP92RFaL4K9LUjl+5iyvDO9mV4gBwFK3CVqqyhNJG/jyh4P86eYOXN66vtshmRIQEcZf25qnB7Xn000HGDUzmROZ2W6HFfIsaZiglfDVNuYt38G4q1sxJL652+GYUrqzZwyTh3RmxfbD3P7mdxw5meV2SCHNkoYJSks27ONPH21iQMdGPNzXSmsrupu7RvP6Hd3ZtO84t73xLfsyzrgdUsiypGGCzrpdGdyfuJpO0bX5+21drPImSFwf15BZo+LZc/Q0t77xDT8eOul2SCHJkoYJKnuOnmb07GTq1qjMdCutDTq9WtXn7Xsv4/iZbAa//i2p+467HVLIsaRhgsaJzGzunpXM6awcZo6KJ+oiK60NRp2b1WbB2J4IcNsb37JqxxG3QwopljRMUMjOyWXC2yv54cAJXr2jG20aWmltMGvT8CIWjutFZLUIbp++nK/TDrodUsiwpGEqPFXlqQ828nlqOk8P6sCVsbZiYyhoXq86C8f1pFmd6oyamczi9fvcDikkWNIwFd7Mr7cz59sfGXPVxQy/1EprQ0mDWlWZP/YyLmlSi3FvpfDXJZvJzsl1O6ygZknDVGifbtzP0x9upG/7hkzq187tcIwLalevTOK9lzHE04ypn21h+JvL2Ztx2u2wgpZfSUNE+olIqoikicjPVsxzlnOd72xfLiIxebY94rSnikjf4sYUkXlO+3oRmSEiEU77NSKSISKrnX+Pn8+Jm4pv/e4MJiauomPTSF4c0tVKa0NYtcphPDe4E5OHdGb9ngwGvPQln9lKgGWi2KQhImHAVKA/EAcME5G4fN1GA0dUtTUwGXjO2TcO3/rf7YF+wKsiElbMmPOAdkBHoBpwT57jfKmqXZx/T2FC1t4MX2lt7WoRTL/LQ7XKVlprfH8EmDThChpcVJVRM5N5brFNV11o/lxp9ADSVHWrqmYBicCgfH0GAbOd1wuB3iIiTnuiqmaq6jYgzRmv0DFV9SN1ACuA6PM7RRNsTmZmM3qWl5OZOcwYFU+DWlXdDskEkNYNavL+hMsZ1qMZr32+haHTvmPPUZuuulD8SRpNgZ153u9y2grso6rZQAZQr4h9ix3TmZa6E1icp7mniKwRkY9FpH1BwYrIGBHxiog3PT3dj9MzFUlOrjLxnVWk7j/OK8O70q5RLbdDMgGoakQYf/lVJ14a2oVNe49xw5Qv+WyzTVddCIF8I/xVYJmqfum8Xwm0UNXOwMvAewXtpKrTVNWjqp6oKCu9DDZPf7CR/2w+wB8Htueatg3cDscEuEFdmpL0mytoWKsqo2Yl85ePN3HWpqvOiz9JYzfQLM/7aKetwD4iEg5EAoeK2LfIMUXkCSAKePCnNlU9pqonnNcfAREiYs+6DiGzv9nOrG+2M/qKltx5WQu3wzEVRKuomrw3/nKGX9qcN77YatNV58mfpJEMxIpISxGpjO/GdlK+PknACOf1YGCpc08iCRjqVFe1BGLx3acodEwRuQfoCwxT1f/+SiAijZz7JIhIDyf2Q6U5aVPxLN28nyf/tYE+lzTk0QGXuB2OqWCqRoTx55s78tLQLmzee4wBU77kP5v2ux1WhVRs0nDuUUwAlgCbgAWqukFEnhKRgU63BKCeiKThuzqY5Oy7AVgAbMR3b2K8quYUNqYz1utAQ+DbfKW1g4H1IrIGmAIMVVt1PiRs3HOM37y9irgmtZgyrAthVlprSmlQl6b86zdX0DiyGqNne/nLRzZdVVISzD93PR6Per1et8Mw52H/sTPcNPVrAN4bfzkNrVLKXABnzubwzIcbeeu7HXRrXpuXh3ejae1qbocVMEQkRVU9BW0L5BvhJsSdyspm9Oxkjp0+S8KIeEsY5oKpGhHGMzd15JXhXfl+/wkGvPQln2606Sp/WNIwAclXWruajXuO8crwbsQ1sdJac+Hd2KkJH/zmCqLrVOOeOV6e+WAjWdk2XVUUSxomIP35o018umk/T/yyPde2s9JaU3Zi6tfgH/f14s7LWjD9q23c9sa37Dpyyu2wApYlDRNw5n73IwlfbWNkrxhG9IpxOxwTAqpGhPH0TR2YOrwbWw74pqv+vcEetV4QSxomoHyeeoA/Jm2gd7sG/OHG/I84M6Zs3dCpMR9MvILm9aozZm4KT9t01c9Y0jABY/O+Y0x4exVtG17ElGFdrbTWuKJFPd901cheMSR8tY1b3/iWnYdtuuonljRMQDhw7Ax3z0ymRpUwEkZ6qFEl3O2QTAirEh7GHwe257Xbu7H1wAlumPIlS2y6CrCkYQLAqaxs7pnj5ahTWts40urlTWDo37ExH068kpj6NRg7N4Un/7Uh5KerLGkYV+XmKg/MX8363RlMGdqVDk0j3Q7JmHM0r1edd8f1ZGSvGGZ+vZ1bX/8mpKerLGkYVz27eDNLNuznsRvi6BPX0O1wjCnQT9NVr9/Rna0HTzJgypcsXr/X7bBcYUnDuObt5TuYtmwrd/VswajLY9wOx5hi9evQiI8mXsnF9Wsw7q2V/DFpA5nZOW6HVa4saRhXLPs+nT+8v55r2kbx+I1xOA8wNibgNatbnXfH9eLuy1sy65vtDH7tW3YcCp3pKksaptyl7jvO+HkriW1Qk1eGdyM8zL4MTcVSObwSj/8yjjfu7M6Ph05yw5Qv+WhdaExX2XerKVfpxzO5e1Yy1SqHMWNkPDWttNZUYH3bN+LDiVfSqkFNfj1vJU+8vz7op6ssaZhyczorh3vmeDl8MouEEfE0sUdRmyDQrG51Foztyb1XtmT2tz9y45Sv+HZL8K4PZ0nDlIvcXOXBBatZu+soLw3tQsdoK601waNyeCV+f0McM0fGc/psDsPe/I6J76xi/7Ezbod2wfmVNESkn4ikikiaiEwqYHsVEZnvbF8uIjF5tj3itKeKSN/ixhSReU77ehGZISIRTruIyBSn/1oR6XY+J27K1/NLUvl4/T5+P+ASftG+kdvhGFMmrm3XgE8fvJqJvWNZvGEf1/3tc6Yt2xJUqwMWmzREJAyYCvQH4oBhIpL/SXKjgSOq2hqYDDzn7BuHb/3v9kA/4FURCStmzHlAO6AjUA24x2nvj2+N8VhgDPBaaU7YlL/EFTt4/Yst3H5pc0Zf0dLtcIwpU1Ujwnjw+jZ88sBVXHZxPf780Wb6v/Ql36QddDu0C8KfK40eQJqqblXVLCARGJSvzyBgtvN6IdBbfDWUg4BEVc1U1W1AmjNeoWOq6kfqAFYA0XmOMcfZ9B1QW0Qal/K8TTn5Ou0gj723nitj6/PkwPZWWmtCRot6NUgYGc/0uzxkZucwfPpyJry9kn0ZFXvKyp+k0RTYmef9LqetwD6qmg1kAPWK2LfYMZ1pqTuBxSWIwwSQH/YfZ9xbKVwcVYOpt1tprQlNfeIa8skDV3N/n1g+2bif6174nNe/2FJhn2EVyN/FrwLLVPXLkuwkImNExCsi3vT09DIKzRTn4IlMRs1Kpkq4r7S2VtUIt0MyxjVVI8K4v08bPn3wanq1qs+zH2+m/0vL+LoCTln5kzR2A83yvI922grsIyLhQCRwqIh9ixxTRJ4AooAHSxgHqjpNVT2q6omKivLj9MyFduZsDvfO8XLwRCbTR3iIrlPd7ZCMCQjN6lZn+ggPM0Z6OJuj3D59OePnrWRvxmm3Q/ObP0kjGYgVkZYiUhnfje2kfH2SgBHO68HAUueeRBIw1KmuaonvJvaKosYUkXuAvsAwVc3Nd4y7nCqqy4AMVQ2NP8GsQHJzlYfeXcOqHUeZfFsXujSr7XZIxgSc69o15N8PXMWD17fh00376f3CF7z2ecWYsio2aTj3KCYAS4BNwAJV3SAiT4nIQKdbAlBPRNLwXR1McvbdACwANuK7NzFeVXMKG9MZ63WgIfCtiKwWkced9o+Arfhupr8J/Pr8Tt2UhRc+SeXDtXuZ1L8d/TtanYIxhakaEcbE3rF8+uDVXNG6Ps8t3ky/l5bx5Q+BPa0uvguC4OTxeNTr9bodRshY4N3JwwvXMjS+GX/5VUerlDKmBD5LPcCTSRvYfugU/Ts04rEb42jq0lMTRCRFVT0FbQvkG+GmAvlmy0EeXbSOK1rX5+mbOljCMKaErm3bgMX3X8X/+0UbPks9QJ8XvmDqZ2kB9ywrSxrmvKUdOMG4uSm0rO8rrY2w0lpjSqVqRBgTrvNNWV3dJoq/Lkml34tf8sX3gTNlZd/d5rwcOuF7am1EWCVmjIwnspqV1hpzvqLrVOf1O7sz++4eAIyYsYKxc73sOuL+uh2WNEypnTmbw5i5Kew/doY3R3hoVtdKa425kK5uE8Xi+6/kd33bsuz7g/T5+xe8svQHV6esLGmYUlFVHl64lpQfj/D327rQrXkdt0MyJihVCQ9j/LWt+fShq7m2bQP+9u/v6Tt5GZ+lHnAlHksaplQmf/I9SWv28Lu+bbmhk5XWGlPWmtauxmt3dGfu6B5UqiSMmpnMvXO87DxcvlNWljRMif0jZRdTlqZxmyeaX1/Tyu1wjAkpV8ZGsfi3V/F//drxdZpvymrKf37gzNnymbKypGFK5Luth5i0aC09L67HMzfZ32IY44bK4ZW475pW/Oehq+kT15C/f/I9fV9cxtLN+8v82JY0jN+2pp9g7NwUmtetzut3dKdyuH35GOOmxpHVmDq8G2+NvpTwSsLds7zcM7tsp6zsu9745fDJLO6elUxYJWHmyB5EVrfSWmMCxRWx9fn4t1fxSP92fLPFN2U159vtZXIsSxqmWJnZOYyd62VPxhnevKs7zetZaa0xgaZyeCXGXt2KpQ9dwy/aNyKqZpUyOU54mYxqgoaqMukf60jefoQpw7rSvUVdt0MyxhShUWRVXh7WtczGtysNU6SX/vMD/1y1m4eub8PAzk3cDscY4zJLGqZQ763azYuf/sAt3aKZcF1rt8MxxgQASxqmQCu2HebhhWu5tGVde8y5Mea/LGmYn9l+8CRj53qJrlONN+600lpjzP/YTwNzjqOnfKW1ADNGxlO7emWXIzLGBBK/koaI9BORVBFJE5FJBWyvIiLzne3LRSQmz7ZHnPZUEelb3JgiMsFpUxGpn6f9GhHJcJaAzbsMrLlAsrJzGTs3hV1HTjPtLg8x9Wu4HZIxJsAUW3IrImHAVOB6YBeQLCJJqroxT7fRwBFVbS0iQ4HngCEiEgcMBdoDTYBPRaSNs09hY34NfAB8XkA4XyDK4IYAABAeSURBVKrqjaU4T1MMVWXSorUs33aYF4d0IT7GSmuNMT/nz5VGDyBNVbeqahaQCAzK12cQMNt5vRDoLb47p4OARFXNVNVtQJozXqFjquoqVd1+nudlSuiVpWksWrmb+/vEclPXpm6HY4wJUP4kjabAzjzvdzltBfZR1WwgA6hXxL7+jFmQniKyRkQ+FpH2BXUQkTEi4hURb3p64CyRGMjeX72bFz75npu7NuW3vWPdDscYE8Aq0o3wlUALVe0MvAy8V1AnVZ2mqh5V9URFRZVrgBVRyo+H+d3CtfSIqcuzt1hprTGmaP4kjd1Aszzvo522AvuISDgQCRwqYl9/xjyHqh5T1RPO64+AiLw3yk3J7Th0invnpNAksipv3NmdKuFhbodkjAlw/iSNZCBWRFqKSGV8N7aT8vVJAkY4rwcDS1VVnfahTnVVSyAWWOHnmOcQkUbOfRJEpIcT+yF/TtL8XMaps4yatYJcVWaMjKdODSutNcYUr9jqKVXNFpEJwBIgDJihqhtE5CnAq6pJQAIwV0TSgMP4kgBOvwXARiAbGK+qOeArrc0/ptM+EXgYaASsFZGPVPUefMnoPhHJBk4DQ53EZEooKzuX++alsOPwKd4afSkXR9V0OyRjTAUhwfxz1+PxqNfrdTuMgKKq/N8/1rLAu4sXbu3MLd2j3Q7JGBNgRCRFVT0FbatIN8LNBfDaF1tY4N3FxOtaW8IwxpSYJY0Q8uHavTy/OJWBnZvwwPVtit/BGGPysaQRIlbuOMKDC1bjaVGH5wd3stJaY0ypWNIIATsPn2LMHC8Na/lKa6tGWGmtMaZ0bLnXIJdx+ix3z0omKzuXxDHx1CujdYONMaHBkkYQO5uTy/h5K9l28CRzRvegdQMrrTXGnB9LGkFKVXn8/fV8lXaQvw7uRK9W9sfzxpjzZ/c0gtS0ZVt5Z8VOxl/bils9zYrfwRhj/GBJIwgtXr+XZxdv5sZOjXno+rZuh2OMCSKWNILMmp1HuX/+aro0q83fbu1MpUpWWmuMuXAsaQSRXUdOMXq2l6iLqvDmXR4rrTXGXHB2IzxIHDtzltGzvGRm55A45lLqW2mtMaYM2JVGEMjOyWXC26vYkn6C1+/oTusGF7kdkjEmSNmVRgWnqjyRtIFl36fz3C0duby1ldYaY8qOXWlUcAlfbWPe8h2Mu7oVQ+Kbux2OMSbIWdKowJZs2MefPtrEgI6NeLivldYaY8qeX0lDRPqJSKqIpInIpAK2VxGR+c725SISk2fbI057qoj0LW5MEZngtGneNcDFZ4qzba2IdCvtSQeDdbsyuD9xNZ2ia/P327pYaa0xplwUmzREJAyYCvQH4oBhIhKXr9to4IiqtgYmA885+8bhW/q1PdAPeFVEwooZ82ugD/BjvmP0x7fGeCwwBnitZKcaPPYcPc3o2cnUrVGZ6VZaa4wpR/5cafQA0lR1q6pmAYnAoHx9BgGzndcLgd7iW7BhEJCoqpmqug1Ic8YrdExVXaWq2wuIYxAwR32+A2qLSOOSnGwwOJGZzd2zkjmdlcPMUfFEXWSltcaY8uNP0mgK7MzzfpfTVmAfVc0GMoB6Rezrz5iliQMRGSMiXhHxpqenFzNkxeIrrV3JDwdO8Ood3WjT0EprjTHlK+huhKvqNFX1qKonKirK7XAuGFXlqQ828nlqOk8P6sCVscFzbsaYisOfpLEbyPuY1GinrcA+IhIORAKHitjXnzFLE0fQmvn1duZ8+yNjrrqY4Zdaaa0xxh3+JI1kIFZEWopIZXw3tpPy9UkCRjivBwNLVVWd9qFOdVVLfDexV/g5Zn5JwF1OFdVlQIaq7vUj/grv0437efrDjfRt35BJ/dq5HY4xJoQV+xfhqpotIhOAJUAYMENVN4jIU4BXVZOABGCuiKQBh/ElAZx+C4CNQDYwXlVzwFdam39Mp30i8DDQCFgrIh+p6j3AR8AAfDfTTwGjLtSHEMjW785gYuIqOjaN5MUhXa201hjjKvFdEAQnj8ejXq/X7TBKbV/GGQZN/YowEd4bfzkNalV1OyRjTAgQkRRV9RS0zZ49FaBOOqW1JzNzWHhfT0sYxpiAYEkjAOXkKhPfWUXq/uMkjPDQrlEtt0MyxhggCEtug8HTH2zkP5sP8MeB7bmmbQO3wzHGmP+ypBFgZn+znVnfbGf0FS2587IWbodjjDHnsKQRQJZu3s+T/9pAn0sa8uiAS9wOxxhjfsaSRoDYuOcYv3l7FXFNajFlWBfCrLTWGBOALGkEgP3HzjB6djK1qkWQMCKe6pWtPsEYE5gsabjsVFY2o2cnc+z0WRJGxNPQSmuNMQHMfqV1ka+0djUb9xwjYUQ8cU2stNYYE9jsSsNFf/5oE59u2s8Tv2zPte2stNYYE/gsabhk7nc/kvDVNkb2imFErxi3wzHGGL9Y0nDB56kH+GPSBnq3a8Afbsy/cq4xxgQuSxrlbPO+Y0x4exVtG17ElGFdrbTWGFOhWNIoRweOneHumcnUqBJGwkgPNapYHYIxpmKxn1rl5FRWNvfM8XL09FkWjO1J48hqbodkjDElZlca5SA3V3lg/mrW785gytCudGga6XZIxhhTKn4lDRHpJyKpIpImIpMK2F5FROY725eLSEyebY847aki0re4MZ0lYJc77fOd5WARkZEiki4iq51/95zPiZenZxdvZsmG/Tx2Qxx94hq6HY4xxpRasUlDRMKAqUB/IA4YJiL5S35GA0dUtTUwGXjO2TcO39Kv7YF+wKsiElbMmM8Bk52xjjhj/2S+qnZx/k0v1RmXs7eX72Dasq3c1bMFoy6PcTscY4w5L/5cafQA0lR1q6pmAYnAoHx9BgGzndcLgd4iIk57oqpmquo2fOt79yhsTGef65wxcMa8qfSn565l36fzh/fXc03bKB6/MQ7f6RljTMXlT9JoCuzM836X01ZgH1XNBjKAekXsW1h7PeCoM0ZBx7pFRNaKyEIRaeZH7K5J3Xec8fNWEtugJq8M70Z4mN0+MsZUfBXpJ9m/gBhV7QR8wv+ubM4hImNExCsi3vT09HIN8CfpxzO5e1Yy1SqHMWNkPDWttNYYEyT8SRq7gby/1Uc7bQX2EZFwIBI4VMS+hbUfAmo7Y5xzLFU9pKqZTvt0oHtBwarqNFX1qKonKirKj9O7sE5n5XDPHC+HT2aRMCKeJrWttNYYEzz8SRrJQKxT1VQZ343tpHx9koARzuvBwFJVVad9qFNd1RKIBVYUNqazz2fOGDhjvg8gIo3zHG8gsKlkp1r2cnOVBxesZu2uo7w0tAsdo6201hgTXIqdN1HVbBGZACwBwoAZqrpBRJ4CvKqaBCQAc0UkDTiMLwng9FsAbASygfGqmgNQ0JjOIf8PSBSRZ4BVztgAE0VkoDPOYWDkeZ/9Bfb8klQ+Xr+Px264hF+0b+R2OMYYc8GJ75f74OTxeNTr9ZbLsRJX7GDSonXcfmlznrmpg1VKGWMqLBFJUVVPQdsq0o3wgPV12kEee289V7WJ4smB7S1hGGOCliWN8/TD/uOMeyuFVlE1mTq8q5XWGmOCmv2EOw8HT2QyalYyVcJ9T629qGqE2yEZY0yZsqRRSmfO5nDvHC8HT2SSMMJDdJ3qbodkjDFlzv7qrBRyc5WH3l3D6p1Hee32bnRuVtvtkIwxplzYlUYpvPBJKh+u3cukfu3o16Fx8TsYY0yQsKRRQgu8O5n62RaG9WjGmKsudjscY4wpV5Y0SuCbLQd5dNE6roytz1OD7G8xjDGhx5KGn9IOnGDc3BRa1q/B1Nu7EWGltcaYEGQ/+fxw6ITvqbWVwysxY2Q8tay01hgToqx6qhhnzuYwZm4K+4+dIXHMZTSra6W1xpjQZUmjCKrKwwvXkvLjEV69vRtdm9dxOyRjjHGVTU8VYfIn35O0Zg8P92vLgI5WWmuMMZY0CvGPlF1MWZrGEE8z7ru6ldvhGGNMQLCkUYDlWw8xadFaerWqx9P2mHNjjPkvu6dRgMjqEfRsVZ+Xh3alcrjlVWOM+YkljQK0a1SLOXf3cDsMY4wJOH79Gi0i/UQkVUTSRGRSAduriMh8Z/tyEYnJs+0Rpz1VRPoWN6azbvhyp32+s4Z4kccwxhhTPopNGiISBkwF+gNxwDARicvXbTRwRFVbA5OB55x94/CtF94e6Ae8KiJhxYz5HDDZGeuIM3ahxzDGGFN+/LnS6AGkqepWVc0CEoFB+foMAmY7rxcCvcV393gQkKiqmaq6DUhzxitwTGef65wxcMa8qZhjGGOMKSf+JI2mwM4873c5bQX2UdVsIAOoV8S+hbXXA446Y+Q/VmHHOIeIjBERr4h409PT/Tg9Y4wx/gq60iBVnaaqHlX1REVFuR2OMcYEFX+Sxm6gWZ730U5bgX1EJByIBA4VsW9h7YeA2s4Y+Y9V2DGMMcaUE3+SRjIQ61Q1VcZ3YzspX58kYITzejCwVFXVaR/qVD61BGKBFYWN6ezzmTMGzpjvF3MMY4wx5aTYv9NQ1WwRmQAsAcKAGaq6QUSeAryqmgQkAHNFJA04jC8J4PRbAGwEsoHxqpoDUNCYziH/D0gUkWeAVc7YFHYMY4wx5UeC+Zd1EUkHfizl7vWBgxcwnIrOPo9z2efxP/ZZnCsYPo8WqlrgTeGgThrnQ0S8qupxO45AYZ/Huezz+B/7LM4V7J9H0FVPGWOMKTuWNIwxxvjNkkbhprkdQICxz+Nc9nn8j30W5wrqz8PuaRhjjPGbXWkYY4zxmyUNY4wxfrOkUYDi1g8JJSLSTEQ+E5GNIrJBRH7rdkxucx7vv0pEPnA7FreJSG0RWSgim0Vkk4j0dDsmt4jIA873yHoReUdEqrodU1mwpJGPn+uHhJJs4CFVjQMuA8aH+OcB8Ftgk9tBBIiXgMWq2g7oTIh+LiLSFJgIeFS1A74nXQTlUyssafycP+uHhAxV3auqK53Xx/H9UMj/aPyQISLRwA3AdLdjcZuIRAJX4TzqR1WzVPWou1G5Khyo5jxQtTqwx+V4yoQljZ/zZ/2QkOQssdsVWO5uJK56EXgYyHU7kADQEkgHZjrTddNFpIbbQblBVXcDfwN2AHuBDFX9t7tRlQ1LGsYvIlIT+Adwv6oeczseN4jIjcABVU1xO5YAEQ50A15T1a7ASSAk7wGKSB18MxItgSZADRG5w92oyoYljZ/zZ/2QkCIiEfgSxjxVXeR2PC66HBgoItvxTVteJyJvuRuSq3YBu1T1pyvPhfiSSCjqA2xT1XRVPQssAnq5HFOZsKTxc/6sHxIynHXYE4BNqvp3t+Nxk6o+oqrRqhqD7+tiqaoG5W+T/lDVfcBOEWnrNPXGtwxCKNoBXCYi1Z3vmd4EaVFAsetphJrC1g9xOSw3XQ7cCawTkdVO26Oq+pGLMZnA8RtgnvML1lZglMvxuEJVl4vIQmAlvorDVQTp40TsMSLGGGP8ZtNTxhhj/GZJwxhjjN8saRhjjPGbJQ1jjDF+s6RhjDHGb5Y0jDHG+M2ShjHGGL/9f9x1R23+rYptAAAAAElFTkSuQmCC\n",
            "text/plain": [
              "<Figure size 432x288 with 1 Axes>"
            ]
          },
          "metadata": {
            "tags": [],
            "needs_background": "light"
          }
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WOv6rFsbR9z2",
        "colab_type": "code",
        "outputId": "1d319a50-949f-4c40-a5fe-d50f95693b5e",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "# Train model with adam\n",
        "wandb.init(project=\"training-bn-only\", id=\"resnet-adam\")\n",
        "\n",
        "with strategy.scope():\n",
        "    model = get_training_model(\"adam\")\n",
        "    \n",
        "start = time.time()\n",
        "h = model.fit(train_ds,\n",
        "         validation_data=test_ds,\n",
        "         epochs=75,\n",
        "         callbacks=[WandbCallback()])\n",
        "end = time.time()\n",
        "wandb.log({\"training_time\": end - start})\n",
        "print(\"Network takes {:.3f} seconds to train\".format(end - start))"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/html": [
              "\n",
              "                Logging results to <a href=\"https://wandb.com\" target=\"_blank\">Weights & Biases</a> <a href=\"https://docs.wandb.com/integrations/jupyter.html\" target=\"_blank\">(Documentation)</a>.<br/>\n",
              "                Project page: <a href=\"https://app.wandb.ai/sayakpaul/training-bn-only\" target=\"_blank\">https://app.wandb.ai/sayakpaul/training-bn-only</a><br/>\n",
              "                Run page: <a href=\"https://app.wandb.ai/sayakpaul/training-bn-only/runs/resnet-ramups-adam\" target=\"_blank\">https://app.wandb.ai/sayakpaul/training-bn-only/runs/resnet-ramups-adam</a><br/>\n",
              "            "
            ],
            "text/plain": [
              "<IPython.core.display.HTML object>"
            ]
          },
          "metadata": {
            "tags": []
          }
        },
        {
          "output_type": "stream",
          "text": [
            "Epoch 1/75\n",
            "49/49 [==============================] - 12s 250ms/step - accuracy: 0.3086 - loss: 1.8823 - val_accuracy: 0.1391 - val_loss: 2.4941\n",
            "Epoch 2/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.4289 - loss: 1.5597 - val_accuracy: 0.2024 - val_loss: 2.4139\n",
            "Epoch 3/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.4995 - loss: 1.3862 - val_accuracy: 0.2564 - val_loss: 2.1664\n",
            "Epoch 4/75\n",
            "49/49 [==============================] - 5s 107ms/step - accuracy: 0.5483 - loss: 1.2612 - val_accuracy: 0.3827 - val_loss: 1.7452\n",
            "Epoch 5/75\n",
            "49/49 [==============================] - 7s 147ms/step - accuracy: 0.5870 - loss: 1.1616 - val_accuracy: 0.3760 - val_loss: 1.8512\n",
            "Epoch 6/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.6149 - loss: 1.0844 - val_accuracy: 0.4477 - val_loss: 1.6076\n",
            "Epoch 7/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.6372 - loss: 1.0229 - val_accuracy: 0.5343 - val_loss: 1.3327\n",
            "Epoch 8/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.6537 - loss: 0.9723 - val_accuracy: 0.5692 - val_loss: 1.2722\n",
            "Epoch 9/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.6747 - loss: 0.9258 - val_accuracy: 0.6089 - val_loss: 1.1194\n",
            "Epoch 10/75\n",
            "49/49 [==============================] - 5s 112ms/step - accuracy: 0.6877 - loss: 0.8861 - val_accuracy: 0.6385 - val_loss: 1.0540\n",
            "Epoch 11/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.7003 - loss: 0.8523 - val_accuracy: 0.6291 - val_loss: 1.1420\n",
            "Epoch 12/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.7163 - loss: 0.8135 - val_accuracy: 0.6600 - val_loss: 0.9991\n",
            "Epoch 13/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.7229 - loss: 0.7876 - val_accuracy: 0.6997 - val_loss: 0.8767\n",
            "Epoch 14/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.7338 - loss: 0.7604 - val_accuracy: 0.7383 - val_loss: 0.7593\n",
            "Epoch 15/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.7467 - loss: 0.7225 - val_accuracy: 0.7414 - val_loss: 0.7360\n",
            "Epoch 16/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.7546 - loss: 0.7012 - val_accuracy: 0.7166 - val_loss: 0.8318\n",
            "Epoch 17/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.7618 - loss: 0.6821 - val_accuracy: 0.7042 - val_loss: 0.9334\n",
            "Epoch 18/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.7698 - loss: 0.6599 - val_accuracy: 0.6143 - val_loss: 1.2749\n",
            "Epoch 19/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.7791 - loss: 0.6339 - val_accuracy: 0.7277 - val_loss: 0.8030\n",
            "Epoch 20/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.7844 - loss: 0.6185 - val_accuracy: 0.7509 - val_loss: 0.7282\n",
            "Epoch 21/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.7918 - loss: 0.5976 - val_accuracy: 0.7399 - val_loss: 0.7889\n",
            "Epoch 22/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.7961 - loss: 0.5836 - val_accuracy: 0.7469 - val_loss: 0.7778\n",
            "Epoch 23/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.8028 - loss: 0.5680 - val_accuracy: 0.7622 - val_loss: 0.7159\n",
            "Epoch 24/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8081 - loss: 0.5559 - val_accuracy: 0.7486 - val_loss: 0.7901\n",
            "Epoch 25/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8135 - loss: 0.5414 - val_accuracy: 0.7514 - val_loss: 0.7966\n",
            "Epoch 26/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.8200 - loss: 0.5242 - val_accuracy: 0.7680 - val_loss: 0.7362\n",
            "Epoch 27/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.8211 - loss: 0.5156 - val_accuracy: 0.7912 - val_loss: 0.6527\n",
            "Epoch 28/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8242 - loss: 0.5056 - val_accuracy: 0.7801 - val_loss: 0.7003\n",
            "Epoch 29/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.8317 - loss: 0.4851 - val_accuracy: 0.8086 - val_loss: 0.5798\n",
            "Epoch 30/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8356 - loss: 0.4765 - val_accuracy: 0.7995 - val_loss: 0.6182\n",
            "Epoch 31/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.8383 - loss: 0.4645 - val_accuracy: 0.8032 - val_loss: 0.6073\n",
            "Epoch 32/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.8417 - loss: 0.4560 - val_accuracy: 0.7868 - val_loss: 0.7058\n",
            "Epoch 33/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8448 - loss: 0.4450 - val_accuracy: 0.7810 - val_loss: 0.7143\n",
            "Epoch 34/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8496 - loss: 0.4362 - val_accuracy: 0.8088 - val_loss: 0.6057\n",
            "Epoch 35/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8516 - loss: 0.4254 - val_accuracy: 0.7762 - val_loss: 0.7130\n",
            "Epoch 36/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.8502 - loss: 0.4307 - val_accuracy: 0.8018 - val_loss: 0.6292\n",
            "Epoch 37/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8580 - loss: 0.4090 - val_accuracy: 0.8094 - val_loss: 0.5886\n",
            "Epoch 38/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.8590 - loss: 0.4081 - val_accuracy: 0.8024 - val_loss: 0.6338\n",
            "Epoch 39/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8619 - loss: 0.3987 - val_accuracy: 0.8056 - val_loss: 0.6232\n",
            "Epoch 40/75\n",
            "49/49 [==============================] - 4s 87ms/step - accuracy: 0.8639 - loss: 0.3912 - val_accuracy: 0.8162 - val_loss: 0.6053\n",
            "Epoch 41/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.8671 - loss: 0.3832 - val_accuracy: 0.7716 - val_loss: 0.8025\n",
            "Epoch 42/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.8683 - loss: 0.3763 - val_accuracy: 0.7943 - val_loss: 0.6929\n",
            "Epoch 43/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.8688 - loss: 0.3706 - val_accuracy: 0.7990 - val_loss: 0.6935\n",
            "Epoch 44/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.8710 - loss: 0.3656 - val_accuracy: 0.8101 - val_loss: 0.6356\n",
            "Epoch 45/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8742 - loss: 0.3601 - val_accuracy: 0.8157 - val_loss: 0.6120\n",
            "Epoch 46/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.8783 - loss: 0.3489 - val_accuracy: 0.8113 - val_loss: 0.6178\n",
            "Epoch 47/75\n",
            "49/49 [==============================] - 8s 161ms/step - accuracy: 0.8808 - loss: 0.3434 - val_accuracy: 0.7725 - val_loss: 0.8290\n",
            "Epoch 48/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.8819 - loss: 0.3384 - val_accuracy: 0.7863 - val_loss: 0.7350\n",
            "Epoch 49/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8831 - loss: 0.3346 - val_accuracy: 0.8162 - val_loss: 0.6388\n",
            "Epoch 50/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.8881 - loss: 0.3241 - val_accuracy: 0.8021 - val_loss: 0.6776\n",
            "Epoch 51/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.8864 - loss: 0.3206 - val_accuracy: 0.8133 - val_loss: 0.6177\n",
            "Epoch 52/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.8902 - loss: 0.3151 - val_accuracy: 0.8102 - val_loss: 0.6530\n",
            "Epoch 53/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8922 - loss: 0.3048 - val_accuracy: 0.8207 - val_loss: 0.6090\n",
            "Epoch 54/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8919 - loss: 0.3087 - val_accuracy: 0.7969 - val_loss: 0.6728\n",
            "Epoch 55/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8940 - loss: 0.3001 - val_accuracy: 0.7870 - val_loss: 0.7686\n",
            "Epoch 56/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.8951 - loss: 0.2937 - val_accuracy: 0.8202 - val_loss: 0.6452\n",
            "Epoch 57/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.8990 - loss: 0.2877 - val_accuracy: 0.8040 - val_loss: 0.6950\n",
            "Epoch 58/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.8971 - loss: 0.2882 - val_accuracy: 0.8300 - val_loss: 0.5900\n",
            "Epoch 59/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.9004 - loss: 0.2864 - val_accuracy: 0.8250 - val_loss: 0.5942\n",
            "Epoch 60/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.9036 - loss: 0.2750 - val_accuracy: 0.8136 - val_loss: 0.6708\n",
            "Epoch 61/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.9044 - loss: 0.2674 - val_accuracy: 0.8060 - val_loss: 0.6946\n",
            "Epoch 62/75\n",
            "49/49 [==============================] - 4s 91ms/step - accuracy: 0.9070 - loss: 0.2637 - val_accuracy: 0.7973 - val_loss: 0.7207\n",
            "Epoch 63/75\n",
            "49/49 [==============================] - 4s 91ms/step - accuracy: 0.9073 - loss: 0.2600 - val_accuracy: 0.8280 - val_loss: 0.5997\n",
            "Epoch 64/75\n",
            "49/49 [==============================] - 5s 92ms/step - accuracy: 0.9083 - loss: 0.2610 - val_accuracy: 0.8251 - val_loss: 0.5986\n",
            "Epoch 65/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.9121 - loss: 0.2539 - val_accuracy: 0.8240 - val_loss: 0.5925\n",
            "Epoch 66/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.9107 - loss: 0.2543 - val_accuracy: 0.7554 - val_loss: 1.0014\n",
            "Epoch 67/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.9120 - loss: 0.2481 - val_accuracy: 0.8127 - val_loss: 0.7072\n",
            "Epoch 68/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.9134 - loss: 0.2434 - val_accuracy: 0.8107 - val_loss: 0.6879\n",
            "Epoch 69/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.9172 - loss: 0.2381 - val_accuracy: 0.8238 - val_loss: 0.6552\n",
            "Epoch 70/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.9184 - loss: 0.2346 - val_accuracy: 0.8177 - val_loss: 0.6796\n",
            "Epoch 71/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.9178 - loss: 0.2339 - val_accuracy: 0.8326 - val_loss: 0.6069\n",
            "Epoch 72/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.9206 - loss: 0.2279 - val_accuracy: 0.8214 - val_loss: 0.6747\n",
            "Epoch 73/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.9198 - loss: 0.2299 - val_accuracy: 0.7988 - val_loss: 0.8151\n",
            "Epoch 74/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.9207 - loss: 0.2231 - val_accuracy: 0.8340 - val_loss: 0.5856\n",
            "Epoch 75/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.9227 - loss: 0.2211 - val_accuracy: 0.8217 - val_loss: 0.6991\n",
            "Network takes 386.764 seconds to train\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "TC-lTSzltO1I",
        "colab_type": "code",
        "outputId": "a43ea341-b0bd-427b-92d7-ca76bc64949d",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "# Train model with adam\n",
        "wandb.init(project=\"training-bn-only\", id=\"resnet-adam-rampup\")\n",
        "\n",
        "with strategy.scope():\n",
        "    model = get_training_model(\"adam\")\n",
        "    \n",
        "start = time.time()\n",
        "h = model.fit(train_ds,\n",
        "         validation_data=test_ds,\n",
        "         epochs=75,\n",
        "         callbacks=[WandbCallback(), lr_callback])\n",
        "end = time.time()\n",
        "wandb.log({\"training_time\": end - start})\n",
        "print(\"Network takes {:.3f} seconds to train\".format(end - start))"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/html": [
              "\n",
              "                Logging results to <a href=\"https://wandb.com\" target=\"_blank\">Weights & Biases</a> <a href=\"https://docs.wandb.com/integrations/jupyter.html\" target=\"_blank\">(Documentation)</a>.<br/>\n",
              "                Project page: <a href=\"https://app.wandb.ai/sayakpaul/training-bn-only\" target=\"_blank\">https://app.wandb.ai/sayakpaul/training-bn-only</a><br/>\n",
              "                Run page: <a href=\"https://app.wandb.ai/sayakpaul/training-bn-only/runs/resnet-adam-rampup\" target=\"_blank\">https://app.wandb.ai/sayakpaul/training-bn-only/runs/resnet-adam-rampup</a><br/>\n",
              "            "
            ],
            "text/plain": [
              "<IPython.core.display.HTML object>"
            ]
          },
          "metadata": {
            "tags": []
          }
        },
        {
          "output_type": "stream",
          "text": [
            "\n",
            "Epoch 00001: LearningRateScheduler reducing learning rate to 1e-05.\n",
            "Epoch 1/75\n",
            "49/49 [==============================] - 12s 249ms/step - accuracy: 0.1112 - loss: 2.4351 - val_accuracy: 0.1164 - val_loss: 2.3487 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00002: LearningRateScheduler reducing learning rate to 8.8e-05.\n",
            "Epoch 2/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.1759 - loss: 2.2261 - val_accuracy: 0.1209 - val_loss: 2.2865 - lr: 8.8000e-05\n",
            "\n",
            "Epoch 00003: LearningRateScheduler reducing learning rate to 0.000166.\n",
            "Epoch 3/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.2772 - loss: 1.9760 - val_accuracy: 0.1536 - val_loss: 2.3793 - lr: 1.6600e-04\n",
            "\n",
            "Epoch 00004: LearningRateScheduler reducing learning rate to 0.000244.\n",
            "Epoch 4/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.3376 - loss: 1.7987 - val_accuracy: 0.1986 - val_loss: 2.3158 - lr: 2.4400e-04\n",
            "\n",
            "Epoch 00005: LearningRateScheduler reducing learning rate to 0.000322.\n",
            "Epoch 5/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.3892 - loss: 1.6624 - val_accuracy: 0.2756 - val_loss: 2.0201 - lr: 3.2200e-04\n",
            "\n",
            "Epoch 00006: LearningRateScheduler reducing learning rate to 0.0004.\n",
            "Epoch 6/75\n",
            "49/49 [==============================] - 6s 118ms/step - accuracy: 0.4315 - loss: 1.5576 - val_accuracy: 0.3337 - val_loss: 1.8677 - lr: 4.0000e-04\n",
            "\n",
            "Epoch 00007: LearningRateScheduler reducing learning rate to 0.000322.\n",
            "Epoch 7/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.4656 - loss: 1.4752 - val_accuracy: 0.3594 - val_loss: 1.8492 - lr: 3.2200e-04\n",
            "\n",
            "Epoch 00008: LearningRateScheduler reducing learning rate to 0.0002596000000000001.\n",
            "Epoch 8/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.4901 - loss: 1.4189 - val_accuracy: 0.4364 - val_loss: 1.5661 - lr: 2.5960e-04\n",
            "\n",
            "Epoch 00009: LearningRateScheduler reducing learning rate to 0.00020968000000000004.\n",
            "Epoch 9/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5044 - loss: 1.3713 - val_accuracy: 0.4881 - val_loss: 1.4076 - lr: 2.0968e-04\n",
            "\n",
            "Epoch 00010: LearningRateScheduler reducing learning rate to 0.00016974400000000002.\n",
            "Epoch 10/75\n",
            "49/49 [==============================] - 5s 112ms/step - accuracy: 0.5159 - loss: 1.3443 - val_accuracy: 0.5036 - val_loss: 1.3598 - lr: 1.6974e-04\n",
            "\n",
            "Epoch 00011: LearningRateScheduler reducing learning rate to 0.00013779520000000003.\n",
            "Epoch 11/75\n",
            "49/49 [==============================] - 5s 112ms/step - accuracy: 0.5244 - loss: 1.3215 - val_accuracy: 0.5214 - val_loss: 1.3159 - lr: 1.3780e-04\n",
            "\n",
            "Epoch 00012: LearningRateScheduler reducing learning rate to 0.00011223616000000004.\n",
            "Epoch 12/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.5352 - loss: 1.2994 - val_accuracy: 0.5356 - val_loss: 1.2858 - lr: 1.1224e-04\n",
            "\n",
            "Epoch 00013: LearningRateScheduler reducing learning rate to 9.178892800000003e-05.\n",
            "Epoch 13/75\n",
            "49/49 [==============================] - 5s 112ms/step - accuracy: 0.5399 - loss: 1.2850 - val_accuracy: 0.5521 - val_loss: 1.2349 - lr: 9.1789e-05\n",
            "\n",
            "Epoch 00014: LearningRateScheduler reducing learning rate to 7.543114240000003e-05.\n",
            "Epoch 14/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5449 - loss: 1.2707 - val_accuracy: 0.5674 - val_loss: 1.2016 - lr: 7.5431e-05\n",
            "\n",
            "Epoch 00015: LearningRateScheduler reducing learning rate to 6.234491392000002e-05.\n",
            "Epoch 15/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.5505 - loss: 1.2600 - val_accuracy: 0.5714 - val_loss: 1.1830 - lr: 6.2345e-05\n",
            "\n",
            "Epoch 00016: LearningRateScheduler reducing learning rate to 5.1875931136000024e-05.\n",
            "Epoch 16/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5517 - loss: 1.2528 - val_accuracy: 0.5720 - val_loss: 1.1798 - lr: 5.1876e-05\n",
            "\n",
            "Epoch 00017: LearningRateScheduler reducing learning rate to 4.3500744908800015e-05.\n",
            "Epoch 17/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5546 - loss: 1.2458 - val_accuracy: 0.5782 - val_loss: 1.1655 - lr: 4.3501e-05\n",
            "\n",
            "Epoch 00018: LearningRateScheduler reducing learning rate to 3.6800595927040014e-05.\n",
            "Epoch 18/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5571 - loss: 1.2383 - val_accuracy: 0.5816 - val_loss: 1.1584 - lr: 3.6801e-05\n",
            "\n",
            "Epoch 00019: LearningRateScheduler reducing learning rate to 3.1440476741632015e-05.\n",
            "Epoch 19/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.5589 - loss: 1.2361 - val_accuracy: 0.5855 - val_loss: 1.1502 - lr: 3.1440e-05\n",
            "\n",
            "Epoch 00020: LearningRateScheduler reducing learning rate to 2.7152381393305616e-05.\n",
            "Epoch 20/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.5630 - loss: 1.2286 - val_accuracy: 0.5921 - val_loss: 1.1423 - lr: 2.7152e-05\n",
            "\n",
            "Epoch 00021: LearningRateScheduler reducing learning rate to 2.3721905114644494e-05.\n",
            "Epoch 21/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.5612 - loss: 1.2271 - val_accuracy: 0.5895 - val_loss: 1.1425 - lr: 2.3722e-05\n",
            "\n",
            "Epoch 00022: LearningRateScheduler reducing learning rate to 2.0977524091715595e-05.\n",
            "Epoch 22/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.5636 - loss: 1.2250 - val_accuracy: 0.5913 - val_loss: 1.1378 - lr: 2.0978e-05\n",
            "\n",
            "Epoch 00023: LearningRateScheduler reducing learning rate to 1.8782019273372477e-05.\n",
            "Epoch 23/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5651 - loss: 1.2199 - val_accuracy: 0.5932 - val_loss: 1.1370 - lr: 1.8782e-05\n",
            "\n",
            "Epoch 00024: LearningRateScheduler reducing learning rate to 1.702561541869798e-05.\n",
            "Epoch 24/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.5653 - loss: 1.2189 - val_accuracy: 0.5924 - val_loss: 1.1321 - lr: 1.7026e-05\n",
            "\n",
            "Epoch 00025: LearningRateScheduler reducing learning rate to 1.5620492334958385e-05.\n",
            "Epoch 25/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5660 - loss: 1.2155 - val_accuracy: 0.5937 - val_loss: 1.1284 - lr: 1.5620e-05\n",
            "\n",
            "Epoch 00026: LearningRateScheduler reducing learning rate to 1.4496393867966709e-05.\n",
            "Epoch 26/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.5669 - loss: 1.2155 - val_accuracy: 0.5926 - val_loss: 1.1303 - lr: 1.4496e-05\n",
            "\n",
            "Epoch 00027: LearningRateScheduler reducing learning rate to 1.3597115094373368e-05.\n",
            "Epoch 27/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5678 - loss: 1.2121 - val_accuracy: 0.5954 - val_loss: 1.1276 - lr: 1.3597e-05\n",
            "\n",
            "Epoch 00028: LearningRateScheduler reducing learning rate to 1.2877692075498695e-05.\n",
            "Epoch 28/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.5670 - loss: 1.2100 - val_accuracy: 0.5949 - val_loss: 1.1276 - lr: 1.2878e-05\n",
            "\n",
            "Epoch 00029: LearningRateScheduler reducing learning rate to 1.2302153660398955e-05.\n",
            "Epoch 29/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.5678 - loss: 1.2131 - val_accuracy: 0.5961 - val_loss: 1.1226 - lr: 1.2302e-05\n",
            "\n",
            "Epoch 00030: LearningRateScheduler reducing learning rate to 1.1841722928319164e-05.\n",
            "Epoch 30/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.5695 - loss: 1.2065 - val_accuracy: 0.5957 - val_loss: 1.1214 - lr: 1.1842e-05\n",
            "\n",
            "Epoch 00031: LearningRateScheduler reducing learning rate to 1.1473378342655331e-05.\n",
            "Epoch 31/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.5705 - loss: 1.2041 - val_accuracy: 0.5963 - val_loss: 1.1184 - lr: 1.1473e-05\n",
            "\n",
            "Epoch 00032: LearningRateScheduler reducing learning rate to 1.1178702674124267e-05.\n",
            "Epoch 32/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.5721 - loss: 1.2044 - val_accuracy: 0.5983 - val_loss: 1.1195 - lr: 1.1179e-05\n",
            "\n",
            "Epoch 00033: LearningRateScheduler reducing learning rate to 1.0942962139299413e-05.\n",
            "Epoch 33/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.5701 - loss: 1.2031 - val_accuracy: 0.5975 - val_loss: 1.1208 - lr: 1.0943e-05\n",
            "\n",
            "Epoch 00034: LearningRateScheduler reducing learning rate to 1.075436971143953e-05.\n",
            "Epoch 34/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5717 - loss: 1.1999 - val_accuracy: 0.5984 - val_loss: 1.1173 - lr: 1.0754e-05\n",
            "\n",
            "Epoch 00035: LearningRateScheduler reducing learning rate to 1.0603495769151624e-05.\n",
            "Epoch 35/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5716 - loss: 1.1993 - val_accuracy: 0.5975 - val_loss: 1.1163 - lr: 1.0603e-05\n",
            "\n",
            "Epoch 00036: LearningRateScheduler reducing learning rate to 1.04827966153213e-05.\n",
            "Epoch 36/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.5722 - loss: 1.1990 - val_accuracy: 0.5996 - val_loss: 1.1148 - lr: 1.0483e-05\n",
            "\n",
            "Epoch 00037: LearningRateScheduler reducing learning rate to 1.038623729225704e-05.\n",
            "Epoch 37/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5697 - loss: 1.2001 - val_accuracy: 0.5991 - val_loss: 1.1128 - lr: 1.0386e-05\n",
            "\n",
            "Epoch 00038: LearningRateScheduler reducing learning rate to 1.0308989833805632e-05.\n",
            "Epoch 38/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.5732 - loss: 1.1964 - val_accuracy: 0.5983 - val_loss: 1.1139 - lr: 1.0309e-05\n",
            "\n",
            "Epoch 00039: LearningRateScheduler reducing learning rate to 1.0247191867044507e-05.\n",
            "Epoch 39/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.5751 - loss: 1.1966 - val_accuracy: 0.5995 - val_loss: 1.1122 - lr: 1.0247e-05\n",
            "\n",
            "Epoch 00040: LearningRateScheduler reducing learning rate to 1.0197753493635605e-05.\n",
            "Epoch 40/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5750 - loss: 1.1930 - val_accuracy: 0.5997 - val_loss: 1.1096 - lr: 1.0198e-05\n",
            "\n",
            "Epoch 00041: LearningRateScheduler reducing learning rate to 1.0158202794908484e-05.\n",
            "Epoch 41/75\n",
            "49/49 [==============================] - 6s 113ms/step - accuracy: 0.5727 - loss: 1.1953 - val_accuracy: 0.5990 - val_loss: 1.1089 - lr: 1.0158e-05\n",
            "\n",
            "Epoch 00042: LearningRateScheduler reducing learning rate to 1.0126562235926787e-05.\n",
            "Epoch 42/75\n",
            "49/49 [==============================] - 6s 114ms/step - accuracy: 0.5751 - loss: 1.1930 - val_accuracy: 0.6009 - val_loss: 1.1064 - lr: 1.0127e-05\n",
            "\n",
            "Epoch 00043: LearningRateScheduler reducing learning rate to 1.010124978874143e-05.\n",
            "Epoch 43/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.5738 - loss: 1.1935 - val_accuracy: 0.6024 - val_loss: 1.1052 - lr: 1.0101e-05\n",
            "\n",
            "Epoch 00044: LearningRateScheduler reducing learning rate to 1.0080999830993145e-05.\n",
            "Epoch 44/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.5766 - loss: 1.1910 - val_accuracy: 0.6029 - val_loss: 1.1052 - lr: 1.0081e-05\n",
            "\n",
            "Epoch 00045: LearningRateScheduler reducing learning rate to 1.0064799864794516e-05.\n",
            "Epoch 45/75\n",
            "49/49 [==============================] - 5s 112ms/step - accuracy: 0.5773 - loss: 1.1887 - val_accuracy: 0.6029 - val_loss: 1.1039 - lr: 1.0065e-05\n",
            "\n",
            "Epoch 00046: LearningRateScheduler reducing learning rate to 1.0051839891835612e-05.\n",
            "Epoch 46/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5791 - loss: 1.1862 - val_accuracy: 0.6026 - val_loss: 1.1024 - lr: 1.0052e-05\n",
            "\n",
            "Epoch 00047: LearningRateScheduler reducing learning rate to 1.004147191346849e-05.\n",
            "Epoch 47/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5779 - loss: 1.1899 - val_accuracy: 0.6039 - val_loss: 1.1022 - lr: 1.0041e-05\n",
            "\n",
            "Epoch 00048: LearningRateScheduler reducing learning rate to 1.0033177530774792e-05.\n",
            "Epoch 48/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5770 - loss: 1.1879 - val_accuracy: 0.6025 - val_loss: 1.1018 - lr: 1.0033e-05\n",
            "\n",
            "Epoch 00049: LearningRateScheduler reducing learning rate to 1.0026542024619834e-05.\n",
            "Epoch 49/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.5758 - loss: 1.1864 - val_accuracy: 0.6052 - val_loss: 1.1019 - lr: 1.0027e-05\n",
            "\n",
            "Epoch 00050: LearningRateScheduler reducing learning rate to 1.0021233619695867e-05.\n",
            "Epoch 50/75\n",
            "49/49 [==============================] - 6s 114ms/step - accuracy: 0.5747 - loss: 1.1856 - val_accuracy: 0.6037 - val_loss: 1.1003 - lr: 1.0021e-05\n",
            "\n",
            "Epoch 00051: LearningRateScheduler reducing learning rate to 1.0016986895756694e-05.\n",
            "Epoch 51/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5794 - loss: 1.1822 - val_accuracy: 0.6047 - val_loss: 1.0999 - lr: 1.0017e-05\n",
            "\n",
            "Epoch 00052: LearningRateScheduler reducing learning rate to 1.0013589516605356e-05.\n",
            "Epoch 52/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.5807 - loss: 1.1811 - val_accuracy: 0.6066 - val_loss: 1.0992 - lr: 1.0014e-05\n",
            "\n",
            "Epoch 00053: LearningRateScheduler reducing learning rate to 1.0010871613284285e-05.\n",
            "Epoch 53/75\n",
            "49/49 [==============================] - 10s 207ms/step - accuracy: 0.5790 - loss: 1.1808 - val_accuracy: 0.6066 - val_loss: 1.0981 - lr: 1.0011e-05\n",
            "\n",
            "Epoch 00054: LearningRateScheduler reducing learning rate to 1.0008697290627427e-05.\n",
            "Epoch 54/75\n",
            "49/49 [==============================] - 6s 112ms/step - accuracy: 0.5802 - loss: 1.1792 - val_accuracy: 0.6058 - val_loss: 1.0949 - lr: 1.0009e-05\n",
            "\n",
            "Epoch 00055: LearningRateScheduler reducing learning rate to 1.0006957832501943e-05.\n",
            "Epoch 55/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.5806 - loss: 1.1759 - val_accuracy: 0.6072 - val_loss: 1.0958 - lr: 1.0007e-05\n",
            "\n",
            "Epoch 00056: LearningRateScheduler reducing learning rate to 1.0005566266001554e-05.\n",
            "Epoch 56/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5790 - loss: 1.1795 - val_accuracy: 0.6055 - val_loss: 1.0944 - lr: 1.0006e-05\n",
            "\n",
            "Epoch 00057: LearningRateScheduler reducing learning rate to 1.0004453012801243e-05.\n",
            "Epoch 57/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.5798 - loss: 1.1760 - val_accuracy: 0.6063 - val_loss: 1.0898 - lr: 1.0004e-05\n",
            "\n",
            "Epoch 00058: LearningRateScheduler reducing learning rate to 1.0003562410240995e-05.\n",
            "Epoch 58/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.5839 - loss: 1.1711 - val_accuracy: 0.6078 - val_loss: 1.0901 - lr: 1.0004e-05\n",
            "\n",
            "Epoch 00059: LearningRateScheduler reducing learning rate to 1.0002849928192796e-05.\n",
            "Epoch 59/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5852 - loss: 1.1713 - val_accuracy: 0.6071 - val_loss: 1.0881 - lr: 1.0003e-05\n",
            "\n",
            "Epoch 00060: LearningRateScheduler reducing learning rate to 1.0002279942554237e-05.\n",
            "Epoch 60/75\n",
            "49/49 [==============================] - 4s 88ms/step - accuracy: 0.5813 - loss: 1.1729 - val_accuracy: 0.6072 - val_loss: 1.0895 - lr: 1.0002e-05\n",
            "\n",
            "Epoch 00061: LearningRateScheduler reducing learning rate to 1.000182395404339e-05.\n",
            "Epoch 61/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5820 - loss: 1.1711 - val_accuracy: 0.6086 - val_loss: 1.0874 - lr: 1.0002e-05\n",
            "\n",
            "Epoch 00062: LearningRateScheduler reducing learning rate to 1.0001459163234711e-05.\n",
            "Epoch 62/75\n",
            "49/49 [==============================] - 5s 107ms/step - accuracy: 0.5840 - loss: 1.1663 - val_accuracy: 0.6091 - val_loss: 1.0856 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00063: LearningRateScheduler reducing learning rate to 1.000116733058777e-05.\n",
            "Epoch 63/75\n",
            "49/49 [==============================] - 4s 90ms/step - accuracy: 0.5809 - loss: 1.1733 - val_accuracy: 0.6077 - val_loss: 1.0871 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00064: LearningRateScheduler reducing learning rate to 1.0000933864470216e-05.\n",
            "Epoch 64/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.5831 - loss: 1.1679 - val_accuracy: 0.6090 - val_loss: 1.0864 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00065: LearningRateScheduler reducing learning rate to 1.0000747091576173e-05.\n",
            "Epoch 65/75\n",
            "49/49 [==============================] - 5s 108ms/step - accuracy: 0.5851 - loss: 1.1720 - val_accuracy: 0.6095 - val_loss: 1.0847 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00066: LearningRateScheduler reducing learning rate to 1.0000597673260939e-05.\n",
            "Epoch 66/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5849 - loss: 1.1682 - val_accuracy: 0.6090 - val_loss: 1.0832 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00067: LearningRateScheduler reducing learning rate to 1.0000478138608751e-05.\n",
            "Epoch 67/75\n",
            "49/49 [==============================] - 4s 91ms/step - accuracy: 0.5826 - loss: 1.1671 - val_accuracy: 0.6085 - val_loss: 1.0846 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00068: LearningRateScheduler reducing learning rate to 1.0000382510887001e-05.\n",
            "Epoch 68/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.5858 - loss: 1.1622 - val_accuracy: 0.6086 - val_loss: 1.0822 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00069: LearningRateScheduler reducing learning rate to 1.00003060087096e-05.\n",
            "Epoch 69/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5864 - loss: 1.1625 - val_accuracy: 0.6105 - val_loss: 1.0810 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00070: LearningRateScheduler reducing learning rate to 1.000024480696768e-05.\n",
            "Epoch 70/75\n",
            "49/49 [==============================] - 5s 111ms/step - accuracy: 0.5856 - loss: 1.1640 - val_accuracy: 0.6105 - val_loss: 1.0796 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00071: LearningRateScheduler reducing learning rate to 1.0000195845574146e-05.\n",
            "Epoch 71/75\n",
            "49/49 [==============================] - 4s 89ms/step - accuracy: 0.5854 - loss: 1.1611 - val_accuracy: 0.6108 - val_loss: 1.0805 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00072: LearningRateScheduler reducing learning rate to 1.0000156676459317e-05.\n",
            "Epoch 72/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5886 - loss: 1.1600 - val_accuracy: 0.6109 - val_loss: 1.0793 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00073: LearningRateScheduler reducing learning rate to 1.0000125341167454e-05.\n",
            "Epoch 73/75\n",
            "49/49 [==============================] - 5s 110ms/step - accuracy: 0.5873 - loss: 1.1580 - val_accuracy: 0.6114 - val_loss: 1.0774 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00074: LearningRateScheduler reducing learning rate to 1.0000100272933963e-05.\n",
            "Epoch 74/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5852 - loss: 1.1620 - val_accuracy: 0.6108 - val_loss: 1.0763 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00075: LearningRateScheduler reducing learning rate to 1.000008021834717e-05.\n",
            "Epoch 75/75\n",
            "49/49 [==============================] - 5s 109ms/step - accuracy: 0.5883 - loss: 1.1619 - val_accuracy: 0.6115 - val_loss: 1.0735 - lr: 1.0000e-05\n",
            "Network takes 434.128 seconds to train\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Ou8GN5VhWrbd",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "with strategy.scope():\n",
        "    model = get_training_model(\"adam\")\n",
        "\n",
        "    for layer in model.layers:\n",
        "        if not isinstance(layer, tf.keras.layers.BatchNormalization):\n",
        "            if hasattr(layer, \"trainable\"):\n",
        "                layer.trainable=False\n",
        "    model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
        "    \n",
        "model.summary()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Q0pgz6fvajZV",
        "colab_type": "text"
      },
      "source": [
        "- Total params: 575,114\n",
        "- **Trainable params: 4,000**\n",
        "- Non-trainable params: 571,114"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "HpzMXab8Us7C",
        "colab_type": "code",
        "outputId": "ed69fe5b-cfdd-478f-ae7d-21d2bb51a4b5",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "wandb.init(project=\"training-bn-only\", id=\"bn-only-adam\")\n",
        "\n",
        "start = time.time()\n",
        "h = model.fit(train_ds,\n",
        "         validation_data=test_ds,\n",
        "         epochs=75,\n",
        "         callbacks=[WandbCallback()])\n",
        "end = time.time()\n",
        "wandb.log({\"training_time\": end - start})\n",
        "print(\"Network takes {:.3f} seconds to train\".format(end - start))"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/html": [
              "\n",
              "                Logging results to <a href=\"https://wandb.com\" target=\"_blank\">Weights & Biases</a> <a href=\"https://docs.wandb.com/integrations/jupyter.html\" target=\"_blank\">(Documentation)</a>.<br/>\n",
              "                Project page: <a href=\"https://app.wandb.ai/sayakpaul/training-bn-only\" target=\"_blank\">https://app.wandb.ai/sayakpaul/training-bn-only</a><br/>\n",
              "                Run page: <a href=\"https://app.wandb.ai/sayakpaul/training-bn-only/runs/bn-only-adam\" target=\"_blank\">https://app.wandb.ai/sayakpaul/training-bn-only/runs/bn-only-adam</a><br/>\n",
              "            "
            ],
            "text/plain": [
              "<IPython.core.display.HTML object>"
            ]
          },
          "metadata": {
            "tags": []
          }
        },
        {
          "output_type": "stream",
          "text": [
            "Epoch 1/75\n",
            "49/49 [==============================] - 10s 207ms/step - accuracy: 0.1013 - loss: 2.5196 - val_accuracy: 0.1343 - val_loss: 2.3357\n",
            "Epoch 2/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.1183 - loss: 2.3591 - val_accuracy: 0.1043 - val_loss: 2.2880\n",
            "Epoch 3/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1521 - loss: 2.2691 - val_accuracy: 0.1215 - val_loss: 2.2841\n",
            "Epoch 4/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1839 - loss: 2.2053 - val_accuracy: 0.1331 - val_loss: 2.2559\n",
            "Epoch 5/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.2098 - loss: 2.1544 - val_accuracy: 0.1649 - val_loss: 2.1920\n",
            "Epoch 6/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2328 - loss: 2.1028 - val_accuracy: 0.1922 - val_loss: 2.1286\n",
            "Epoch 7/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.2413 - loss: 2.0627 - val_accuracy: 0.2072 - val_loss: 2.0762\n",
            "Epoch 8/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2528 - loss: 2.0283 - val_accuracy: 0.2307 - val_loss: 2.0285\n",
            "Epoch 9/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2597 - loss: 2.0045 - val_accuracy: 0.2513 - val_loss: 1.9886\n",
            "Epoch 10/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.2657 - loss: 1.9820 - val_accuracy: 0.2676 - val_loss: 1.9532\n",
            "Epoch 11/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.2689 - loss: 1.9676 - val_accuracy: 0.2756 - val_loss: 1.9272\n",
            "Epoch 12/75\n",
            "49/49 [==============================] - 5s 100ms/step - accuracy: 0.2764 - loss: 1.9515 - val_accuracy: 0.2805 - val_loss: 1.9052\n",
            "Epoch 13/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.2749 - loss: 1.9428 - val_accuracy: 0.2898 - val_loss: 1.8894\n",
            "Epoch 14/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.2808 - loss: 1.9268 - val_accuracy: 0.2973 - val_loss: 1.8697\n",
            "Epoch 15/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.2861 - loss: 1.9169 - val_accuracy: 0.3047 - val_loss: 1.8530\n",
            "Epoch 16/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2886 - loss: 1.9086 - val_accuracy: 0.3134 - val_loss: 1.8376\n",
            "Epoch 17/75\n",
            "49/49 [==============================] - 5s 100ms/step - accuracy: 0.2910 - loss: 1.8993 - val_accuracy: 0.3154 - val_loss: 1.8258\n",
            "Epoch 18/75\n",
            "49/49 [==============================] - 5s 99ms/step - accuracy: 0.3016 - loss: 1.8811 - val_accuracy: 0.3207 - val_loss: 1.8143\n",
            "Epoch 19/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.3026 - loss: 1.8753 - val_accuracy: 0.3268 - val_loss: 1.8067\n",
            "Epoch 20/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3018 - loss: 1.8673 - val_accuracy: 0.3294 - val_loss: 1.7935\n",
            "Epoch 21/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.3085 - loss: 1.8587 - val_accuracy: 0.3360 - val_loss: 1.7815\n",
            "Epoch 22/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.3116 - loss: 1.8494 - val_accuracy: 0.3367 - val_loss: 1.7735\n",
            "Epoch 23/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3132 - loss: 1.8395 - val_accuracy: 0.3404 - val_loss: 1.7663\n",
            "Epoch 24/75\n",
            "49/49 [==============================] - 5s 94ms/step - accuracy: 0.3208 - loss: 1.8315 - val_accuracy: 0.3420 - val_loss: 1.7608\n",
            "Epoch 25/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3232 - loss: 1.8248 - val_accuracy: 0.3476 - val_loss: 1.7493\n",
            "Epoch 26/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3268 - loss: 1.8143 - val_accuracy: 0.3515 - val_loss: 1.7412\n",
            "Epoch 27/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3306 - loss: 1.8081 - val_accuracy: 0.3554 - val_loss: 1.7332\n",
            "Epoch 28/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3284 - loss: 1.8015 - val_accuracy: 0.3594 - val_loss: 1.7275\n",
            "Epoch 29/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3301 - loss: 1.7972 - val_accuracy: 0.3599 - val_loss: 1.7221\n",
            "Epoch 30/75\n",
            "49/49 [==============================] - 5s 99ms/step - accuracy: 0.3366 - loss: 1.7871 - val_accuracy: 0.3661 - val_loss: 1.7073\n",
            "Epoch 31/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.3370 - loss: 1.7796 - val_accuracy: 0.3668 - val_loss: 1.7045\n",
            "Epoch 32/75\n",
            "49/49 [==============================] - 5s 99ms/step - accuracy: 0.3423 - loss: 1.7768 - val_accuracy: 0.3677 - val_loss: 1.6944\n",
            "Epoch 33/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.3425 - loss: 1.7674 - val_accuracy: 0.3698 - val_loss: 1.6924\n",
            "Epoch 34/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3457 - loss: 1.7641 - val_accuracy: 0.3736 - val_loss: 1.6794\n",
            "Epoch 35/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3472 - loss: 1.7577 - val_accuracy: 0.3753 - val_loss: 1.6736\n",
            "Epoch 36/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3528 - loss: 1.7491 - val_accuracy: 0.3792 - val_loss: 1.6666\n",
            "Epoch 37/75\n",
            "49/49 [==============================] - 5s 94ms/step - accuracy: 0.3511 - loss: 1.7488 - val_accuracy: 0.3821 - val_loss: 1.6647\n",
            "Epoch 38/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3542 - loss: 1.7431 - val_accuracy: 0.3847 - val_loss: 1.6613\n",
            "Epoch 39/75\n",
            "49/49 [==============================] - 11s 219ms/step - accuracy: 0.3517 - loss: 1.7390 - val_accuracy: 0.3877 - val_loss: 1.6567\n",
            "Epoch 40/75\n",
            "49/49 [==============================] - 4s 82ms/step - accuracy: 0.3585 - loss: 1.7315 - val_accuracy: 0.3865 - val_loss: 1.6591\n",
            "Epoch 41/75\n",
            "49/49 [==============================] - 5s 102ms/step - accuracy: 0.3572 - loss: 1.7298 - val_accuracy: 0.3925 - val_loss: 1.6408\n",
            "Epoch 42/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.3611 - loss: 1.7214 - val_accuracy: 0.3940 - val_loss: 1.6351\n",
            "Epoch 43/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.3629 - loss: 1.7219 - val_accuracy: 0.3941 - val_loss: 1.6337\n",
            "Epoch 44/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.3622 - loss: 1.7168 - val_accuracy: 0.3981 - val_loss: 1.6252\n",
            "Epoch 45/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3671 - loss: 1.7125 - val_accuracy: 0.4018 - val_loss: 1.6216\n",
            "Epoch 46/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.3661 - loss: 1.7104 - val_accuracy: 0.4032 - val_loss: 1.6153\n",
            "Epoch 47/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3666 - loss: 1.7060 - val_accuracy: 0.4064 - val_loss: 1.6095\n",
            "Epoch 48/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3729 - loss: 1.6993 - val_accuracy: 0.4050 - val_loss: 1.6082\n",
            "Epoch 49/75\n",
            "49/49 [==============================] - 5s 99ms/step - accuracy: 0.3695 - loss: 1.6997 - val_accuracy: 0.4082 - val_loss: 1.6029\n",
            "Epoch 50/75\n",
            "49/49 [==============================] - 4s 82ms/step - accuracy: 0.3742 - loss: 1.6954 - val_accuracy: 0.4060 - val_loss: 1.6043\n",
            "Epoch 51/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.3728 - loss: 1.6912 - val_accuracy: 0.4132 - val_loss: 1.5961\n",
            "Epoch 52/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3774 - loss: 1.6864 - val_accuracy: 0.4125 - val_loss: 1.5932\n",
            "Epoch 53/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3753 - loss: 1.6881 - val_accuracy: 0.4106 - val_loss: 1.5896\n",
            "Epoch 54/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3783 - loss: 1.6865 - val_accuracy: 0.4136 - val_loss: 1.5865\n",
            "Epoch 55/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3827 - loss: 1.6780 - val_accuracy: 0.4125 - val_loss: 1.5831\n",
            "Epoch 56/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.3812 - loss: 1.6761 - val_accuracy: 0.4176 - val_loss: 1.5786\n",
            "Epoch 57/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.3810 - loss: 1.6731 - val_accuracy: 0.4196 - val_loss: 1.5766\n",
            "Epoch 58/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3841 - loss: 1.6720 - val_accuracy: 0.4189 - val_loss: 1.5709\n",
            "Epoch 59/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3838 - loss: 1.6689 - val_accuracy: 0.4196 - val_loss: 1.5700\n",
            "Epoch 60/75\n",
            "49/49 [==============================] - 4s 80ms/step - accuracy: 0.3883 - loss: 1.6680 - val_accuracy: 0.4202 - val_loss: 1.5709\n",
            "Epoch 61/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.3864 - loss: 1.6675 - val_accuracy: 0.4203 - val_loss: 1.5663\n",
            "Epoch 62/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3884 - loss: 1.6640 - val_accuracy: 0.4237 - val_loss: 1.5637\n",
            "Epoch 63/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3856 - loss: 1.6606 - val_accuracy: 0.4241 - val_loss: 1.5632\n",
            "Epoch 64/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.3912 - loss: 1.6565 - val_accuracy: 0.4243 - val_loss: 1.5620\n",
            "Epoch 65/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.3941 - loss: 1.6530 - val_accuracy: 0.4258 - val_loss: 1.5572\n",
            "Epoch 66/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.3917 - loss: 1.6515 - val_accuracy: 0.4285 - val_loss: 1.5553\n",
            "Epoch 67/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.3962 - loss: 1.6449 - val_accuracy: 0.4305 - val_loss: 1.5540\n",
            "Epoch 68/75\n",
            "49/49 [==============================] - 4s 82ms/step - accuracy: 0.3961 - loss: 1.6471 - val_accuracy: 0.4267 - val_loss: 1.5559\n",
            "Epoch 69/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3990 - loss: 1.6381 - val_accuracy: 0.4309 - val_loss: 1.5480\n",
            "Epoch 70/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3978 - loss: 1.6449 - val_accuracy: 0.4321 - val_loss: 1.5471\n",
            "Epoch 71/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.3957 - loss: 1.6413 - val_accuracy: 0.4342 - val_loss: 1.5407\n",
            "Epoch 72/75\n",
            "49/49 [==============================] - 4s 79ms/step - accuracy: 0.3967 - loss: 1.6344 - val_accuracy: 0.4317 - val_loss: 1.5423\n",
            "Epoch 73/75\n",
            "49/49 [==============================] - 5s 94ms/step - accuracy: 0.4023 - loss: 1.6294 - val_accuracy: 0.4337 - val_loss: 1.5355\n",
            "Epoch 74/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.4008 - loss: 1.6315 - val_accuracy: 0.4373 - val_loss: 1.5324\n",
            "Epoch 75/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.4019 - loss: 1.6307 - val_accuracy: 0.4367 - val_loss: 1.5308\n",
            "Network takes 383.697 seconds to train\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "2PXol7EltH0s",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "model.save_weights(\"resnet_bn_only_adam.h5\")\n",
        "!gsutil -m cp -r resnet_bn_only_adam.h5 gs://batch_norm_tpu/"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "D-kcR7X1b3CO",
        "colab_type": "code",
        "outputId": "e1154f61-a24b-4416-ecae-d4e446a7fb53",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "# Train model with a decay schedule\n",
        "wandb.init(project=\"training-bn-only\", id=\"bn-only-adam-ramups\")\n",
        "\n",
        "with strategy.scope():\n",
        "    model = get_training_model(\"adam\")\n",
        "\n",
        "    for layer in model.layers:\n",
        "        if not isinstance(layer, tf.keras.layers.BatchNormalization):\n",
        "            if hasattr(layer, \"trainable\"):\n",
        "                layer.trainable=False\n",
        "    model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
        "    \n",
        "start = time.time()\n",
        "h = model.fit(train_ds,\n",
        "         validation_data=test_ds,\n",
        "         epochs=75,\n",
        "         callbacks=[WandbCallback(), lr_callback])\n",
        "end = time.time()\n",
        "wandb.log({\"training_time\": end - start})\n",
        "print(\"Network takes {:.3f} seconds to train\".format(end - start))"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/html": [
              "\n",
              "                Logging results to <a href=\"https://wandb.com\" target=\"_blank\">Weights & Biases</a> <a href=\"https://docs.wandb.com/integrations/jupyter.html\" target=\"_blank\">(Documentation)</a>.<br/>\n",
              "                Project page: <a href=\"https://app.wandb.ai/sayakpaul/training-bn-only\" target=\"_blank\">https://app.wandb.ai/sayakpaul/training-bn-only</a><br/>\n",
              "                Run page: <a href=\"https://app.wandb.ai/sayakpaul/training-bn-only/runs/bn-only-adam-ramups\" target=\"_blank\">https://app.wandb.ai/sayakpaul/training-bn-only/runs/bn-only-adam-ramups</a><br/>\n",
              "            "
            ],
            "text/plain": [
              "<IPython.core.display.HTML object>"
            ]
          },
          "metadata": {
            "tags": []
          }
        },
        {
          "output_type": "stream",
          "text": [
            "\n",
            "Epoch 00001: LearningRateScheduler reducing learning rate to 1e-05.\n",
            "Epoch 1/75\n",
            "49/49 [==============================] - 10s 208ms/step - accuracy: 0.0909 - loss: 2.3637 - val_accuracy: 0.0738 - val_loss: 2.3711 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00002: LearningRateScheduler reducing learning rate to 8.8e-05.\n",
            "Epoch 2/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.0914 - loss: 2.3580 - val_accuracy: 0.0945 - val_loss: 2.3008 - lr: 8.8000e-05\n",
            "\n",
            "Epoch 00003: LearningRateScheduler reducing learning rate to 0.000166.\n",
            "Epoch 3/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.0960 - loss: 2.3415 - val_accuracy: 0.1016 - val_loss: 2.2984 - lr: 1.6600e-04\n",
            "\n",
            "Epoch 00004: LearningRateScheduler reducing learning rate to 0.000244.\n",
            "Epoch 4/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.1089 - loss: 2.3184 - val_accuracy: 0.1156 - val_loss: 2.2935 - lr: 2.4400e-04\n",
            "\n",
            "Epoch 00005: LearningRateScheduler reducing learning rate to 0.000322.\n",
            "Epoch 5/75\n",
            "49/49 [==============================] - 5s 102ms/step - accuracy: 0.1211 - loss: 2.2910 - val_accuracy: 0.1340 - val_loss: 2.2800 - lr: 3.2200e-04\n",
            "\n",
            "Epoch 00006: LearningRateScheduler reducing learning rate to 0.0004.\n",
            "Epoch 6/75\n",
            "49/49 [==============================] - 5s 100ms/step - accuracy: 0.1336 - loss: 2.2662 - val_accuracy: 0.1434 - val_loss: 2.2589 - lr: 4.0000e-04\n",
            "\n",
            "Epoch 00007: LearningRateScheduler reducing learning rate to 0.000322.\n",
            "Epoch 7/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1500 - loss: 2.2425 - val_accuracy: 0.1541 - val_loss: 2.2342 - lr: 3.2200e-04\n",
            "\n",
            "Epoch 00008: LearningRateScheduler reducing learning rate to 0.0002596000000000001.\n",
            "Epoch 8/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1604 - loss: 2.2282 - val_accuracy: 0.1688 - val_loss: 2.2115 - lr: 2.5960e-04\n",
            "\n",
            "Epoch 00009: LearningRateScheduler reducing learning rate to 0.00020968000000000004.\n",
            "Epoch 9/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1661 - loss: 2.2188 - val_accuracy: 0.1775 - val_loss: 2.1920 - lr: 2.0968e-04\n",
            "\n",
            "Epoch 00010: LearningRateScheduler reducing learning rate to 0.00016974400000000002.\n",
            "Epoch 10/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.1736 - loss: 2.2094 - val_accuracy: 0.1875 - val_loss: 2.1776 - lr: 1.6974e-04\n",
            "\n",
            "Epoch 00011: LearningRateScheduler reducing learning rate to 0.00013779520000000003.\n",
            "Epoch 11/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1763 - loss: 2.2027 - val_accuracy: 0.1955 - val_loss: 2.1676 - lr: 1.3780e-04\n",
            "\n",
            "Epoch 00012: LearningRateScheduler reducing learning rate to 0.00011223616000000004.\n",
            "Epoch 12/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1785 - loss: 2.1971 - val_accuracy: 0.2000 - val_loss: 2.1605 - lr: 1.1224e-04\n",
            "\n",
            "Epoch 00013: LearningRateScheduler reducing learning rate to 9.178892800000003e-05.\n",
            "Epoch 13/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1806 - loss: 2.1947 - val_accuracy: 0.2071 - val_loss: 2.1548 - lr: 9.1789e-05\n",
            "\n",
            "Epoch 00014: LearningRateScheduler reducing learning rate to 7.543114240000003e-05.\n",
            "Epoch 14/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1839 - loss: 2.1898 - val_accuracy: 0.2082 - val_loss: 2.1505 - lr: 7.5431e-05\n",
            "\n",
            "Epoch 00015: LearningRateScheduler reducing learning rate to 6.234491392000002e-05.\n",
            "Epoch 15/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1824 - loss: 2.1884 - val_accuracy: 0.2112 - val_loss: 2.1473 - lr: 6.2345e-05\n",
            "\n",
            "Epoch 00016: LearningRateScheduler reducing learning rate to 5.1875931136000024e-05.\n",
            "Epoch 16/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.1873 - loss: 2.1838 - val_accuracy: 0.2114 - val_loss: 2.1447 - lr: 5.1876e-05\n",
            "\n",
            "Epoch 00017: LearningRateScheduler reducing learning rate to 4.3500744908800015e-05.\n",
            "Epoch 17/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1866 - loss: 2.1824 - val_accuracy: 0.2130 - val_loss: 2.1427 - lr: 4.3501e-05\n",
            "\n",
            "Epoch 00018: LearningRateScheduler reducing learning rate to 3.6800595927040014e-05.\n",
            "Epoch 18/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1853 - loss: 2.1823 - val_accuracy: 0.2143 - val_loss: 2.1410 - lr: 3.6801e-05\n",
            "\n",
            "Epoch 00019: LearningRateScheduler reducing learning rate to 3.1440476741632015e-05.\n",
            "Epoch 19/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.1877 - loss: 2.1823 - val_accuracy: 0.2148 - val_loss: 2.1396 - lr: 3.1440e-05\n",
            "\n",
            "Epoch 00020: LearningRateScheduler reducing learning rate to 2.7152381393305616e-05.\n",
            "Epoch 20/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1872 - loss: 2.1782 - val_accuracy: 0.2154 - val_loss: 2.1383 - lr: 2.7152e-05\n",
            "\n",
            "Epoch 00021: LearningRateScheduler reducing learning rate to 2.3721905114644494e-05.\n",
            "Epoch 21/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1898 - loss: 2.1775 - val_accuracy: 0.2144 - val_loss: 2.1370 - lr: 2.3722e-05\n",
            "\n",
            "Epoch 00022: LearningRateScheduler reducing learning rate to 2.0977524091715595e-05.\n",
            "Epoch 22/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1908 - loss: 2.1771 - val_accuracy: 0.2145 - val_loss: 2.1361 - lr: 2.0978e-05\n",
            "\n",
            "Epoch 00023: LearningRateScheduler reducing learning rate to 1.8782019273372477e-05.\n",
            "Epoch 23/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1906 - loss: 2.1760 - val_accuracy: 0.2143 - val_loss: 2.1354 - lr: 1.8782e-05\n",
            "\n",
            "Epoch 00024: LearningRateScheduler reducing learning rate to 1.702561541869798e-05.\n",
            "Epoch 24/75\n",
            "49/49 [==============================] - 5s 100ms/step - accuracy: 0.1908 - loss: 2.1759 - val_accuracy: 0.2150 - val_loss: 2.1346 - lr: 1.7026e-05\n",
            "\n",
            "Epoch 00025: LearningRateScheduler reducing learning rate to 1.5620492334958385e-05.\n",
            "Epoch 25/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.1928 - loss: 2.1741 - val_accuracy: 0.2143 - val_loss: 2.1338 - lr: 1.5620e-05\n",
            "\n",
            "Epoch 00026: LearningRateScheduler reducing learning rate to 1.4496393867966709e-05.\n",
            "Epoch 26/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.1937 - loss: 2.1740 - val_accuracy: 0.2156 - val_loss: 2.1333 - lr: 1.4496e-05\n",
            "\n",
            "Epoch 00027: LearningRateScheduler reducing learning rate to 1.3597115094373368e-05.\n",
            "Epoch 27/75\n",
            "49/49 [==============================] - 5s 101ms/step - accuracy: 0.1920 - loss: 2.1734 - val_accuracy: 0.2152 - val_loss: 2.1327 - lr: 1.3597e-05\n",
            "\n",
            "Epoch 00028: LearningRateScheduler reducing learning rate to 1.2877692075498695e-05.\n",
            "Epoch 28/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1932 - loss: 2.1698 - val_accuracy: 0.2162 - val_loss: 2.1321 - lr: 1.2878e-05\n",
            "\n",
            "Epoch 00029: LearningRateScheduler reducing learning rate to 1.2302153660398955e-05.\n",
            "Epoch 29/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1947 - loss: 2.1717 - val_accuracy: 0.2156 - val_loss: 2.1315 - lr: 1.2302e-05\n",
            "\n",
            "Epoch 00030: LearningRateScheduler reducing learning rate to 1.1841722928319164e-05.\n",
            "Epoch 30/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1934 - loss: 2.1728 - val_accuracy: 0.2156 - val_loss: 2.1311 - lr: 1.1842e-05\n",
            "\n",
            "Epoch 00031: LearningRateScheduler reducing learning rate to 1.1473378342655331e-05.\n",
            "Epoch 31/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.1914 - loss: 2.1721 - val_accuracy: 0.2162 - val_loss: 2.1305 - lr: 1.1473e-05\n",
            "\n",
            "Epoch 00032: LearningRateScheduler reducing learning rate to 1.1178702674124267e-05.\n",
            "Epoch 32/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.1946 - loss: 2.1686 - val_accuracy: 0.2171 - val_loss: 2.1298 - lr: 1.1179e-05\n",
            "\n",
            "Epoch 00033: LearningRateScheduler reducing learning rate to 1.0942962139299413e-05.\n",
            "Epoch 33/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1938 - loss: 2.1708 - val_accuracy: 0.2165 - val_loss: 2.1293 - lr: 1.0943e-05\n",
            "\n",
            "Epoch 00034: LearningRateScheduler reducing learning rate to 1.075436971143953e-05.\n",
            "Epoch 34/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1940 - loss: 2.1693 - val_accuracy: 0.2155 - val_loss: 2.1289 - lr: 1.0754e-05\n",
            "\n",
            "Epoch 00035: LearningRateScheduler reducing learning rate to 1.0603495769151624e-05.\n",
            "Epoch 35/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1931 - loss: 2.1695 - val_accuracy: 0.2161 - val_loss: 2.1286 - lr: 1.0603e-05\n",
            "\n",
            "Epoch 00036: LearningRateScheduler reducing learning rate to 1.04827966153213e-05.\n",
            "Epoch 36/75\n",
            "49/49 [==============================] - 12s 254ms/step - accuracy: 0.1944 - loss: 2.1706 - val_accuracy: 0.2162 - val_loss: 2.1283 - lr: 1.0483e-05\n",
            "\n",
            "Epoch 00037: LearningRateScheduler reducing learning rate to 1.038623729225704e-05.\n",
            "Epoch 37/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1944 - loss: 2.1693 - val_accuracy: 0.2171 - val_loss: 2.1278 - lr: 1.0386e-05\n",
            "\n",
            "Epoch 00038: LearningRateScheduler reducing learning rate to 1.0308989833805632e-05.\n",
            "Epoch 38/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1942 - loss: 2.1665 - val_accuracy: 0.2174 - val_loss: 2.1272 - lr: 1.0309e-05\n",
            "\n",
            "Epoch 00039: LearningRateScheduler reducing learning rate to 1.0247191867044507e-05.\n",
            "Epoch 39/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1929 - loss: 2.1687 - val_accuracy: 0.2167 - val_loss: 2.1267 - lr: 1.0247e-05\n",
            "\n",
            "Epoch 00040: LearningRateScheduler reducing learning rate to 1.0197753493635605e-05.\n",
            "Epoch 40/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1942 - loss: 2.1679 - val_accuracy: 0.2176 - val_loss: 2.1260 - lr: 1.0198e-05\n",
            "\n",
            "Epoch 00041: LearningRateScheduler reducing learning rate to 1.0158202794908484e-05.\n",
            "Epoch 41/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1943 - loss: 2.1682 - val_accuracy: 0.2170 - val_loss: 2.1256 - lr: 1.0158e-05\n",
            "\n",
            "Epoch 00042: LearningRateScheduler reducing learning rate to 1.0126562235926787e-05.\n",
            "Epoch 42/75\n",
            "49/49 [==============================] - 5s 99ms/step - accuracy: 0.1950 - loss: 2.1645 - val_accuracy: 0.2181 - val_loss: 2.1252 - lr: 1.0127e-05\n",
            "\n",
            "Epoch 00043: LearningRateScheduler reducing learning rate to 1.010124978874143e-05.\n",
            "Epoch 43/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.1996 - loss: 2.1653 - val_accuracy: 0.2180 - val_loss: 2.1246 - lr: 1.0101e-05\n",
            "\n",
            "Epoch 00044: LearningRateScheduler reducing learning rate to 1.0080999830993145e-05.\n",
            "Epoch 44/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.1957 - loss: 2.1655 - val_accuracy: 0.2192 - val_loss: 2.1243 - lr: 1.0081e-05\n",
            "\n",
            "Epoch 00045: LearningRateScheduler reducing learning rate to 1.0064799864794516e-05.\n",
            "Epoch 45/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1978 - loss: 2.1647 - val_accuracy: 0.2185 - val_loss: 2.1238 - lr: 1.0065e-05\n",
            "\n",
            "Epoch 00046: LearningRateScheduler reducing learning rate to 1.0051839891835612e-05.\n",
            "Epoch 46/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.1968 - loss: 2.1654 - val_accuracy: 0.2196 - val_loss: 2.1233 - lr: 1.0052e-05\n",
            "\n",
            "Epoch 00047: LearningRateScheduler reducing learning rate to 1.004147191346849e-05.\n",
            "Epoch 47/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1985 - loss: 2.1641 - val_accuracy: 0.2193 - val_loss: 2.1230 - lr: 1.0041e-05\n",
            "\n",
            "Epoch 00048: LearningRateScheduler reducing learning rate to 1.0033177530774792e-05.\n",
            "Epoch 48/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.1973 - loss: 2.1635 - val_accuracy: 0.2193 - val_loss: 2.1227 - lr: 1.0033e-05\n",
            "\n",
            "Epoch 00049: LearningRateScheduler reducing learning rate to 1.0026542024619834e-05.\n",
            "Epoch 49/75\n",
            "49/49 [==============================] - 5s 99ms/step - accuracy: 0.1952 - loss: 2.1649 - val_accuracy: 0.2191 - val_loss: 2.1221 - lr: 1.0027e-05\n",
            "\n",
            "Epoch 00050: LearningRateScheduler reducing learning rate to 1.0021233619695867e-05.\n",
            "Epoch 50/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1951 - loss: 2.1628 - val_accuracy: 0.2187 - val_loss: 2.1218 - lr: 1.0021e-05\n",
            "\n",
            "Epoch 00051: LearningRateScheduler reducing learning rate to 1.0016986895756694e-05.\n",
            "Epoch 51/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1994 - loss: 2.1614 - val_accuracy: 0.2192 - val_loss: 2.1213 - lr: 1.0017e-05\n",
            "\n",
            "Epoch 00052: LearningRateScheduler reducing learning rate to 1.0013589516605356e-05.\n",
            "Epoch 52/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1970 - loss: 2.1638 - val_accuracy: 0.2213 - val_loss: 2.1208 - lr: 1.0014e-05\n",
            "\n",
            "Epoch 00053: LearningRateScheduler reducing learning rate to 1.0010871613284285e-05.\n",
            "Epoch 53/75\n",
            "49/49 [==============================] - 5s 99ms/step - accuracy: 0.1966 - loss: 2.1628 - val_accuracy: 0.2212 - val_loss: 2.1203 - lr: 1.0011e-05\n",
            "\n",
            "Epoch 00054: LearningRateScheduler reducing learning rate to 1.0008697290627427e-05.\n",
            "Epoch 54/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1985 - loss: 2.1626 - val_accuracy: 0.2202 - val_loss: 2.1200 - lr: 1.0009e-05\n",
            "\n",
            "Epoch 00055: LearningRateScheduler reducing learning rate to 1.0006957832501943e-05.\n",
            "Epoch 55/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.1988 - loss: 2.1620 - val_accuracy: 0.2205 - val_loss: 2.1195 - lr: 1.0007e-05\n",
            "\n",
            "Epoch 00056: LearningRateScheduler reducing learning rate to 1.0005566266001554e-05.\n",
            "Epoch 56/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1990 - loss: 2.1608 - val_accuracy: 0.2220 - val_loss: 2.1192 - lr: 1.0006e-05\n",
            "\n",
            "Epoch 00057: LearningRateScheduler reducing learning rate to 1.0004453012801243e-05.\n",
            "Epoch 57/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1955 - loss: 2.1619 - val_accuracy: 0.2202 - val_loss: 2.1186 - lr: 1.0004e-05\n",
            "\n",
            "Epoch 00058: LearningRateScheduler reducing learning rate to 1.0003562410240995e-05.\n",
            "Epoch 58/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1971 - loss: 2.1620 - val_accuracy: 0.2208 - val_loss: 2.1181 - lr: 1.0004e-05\n",
            "\n",
            "Epoch 00059: LearningRateScheduler reducing learning rate to 1.0002849928192796e-05.\n",
            "Epoch 59/75\n",
            "49/49 [==============================] - 5s 100ms/step - accuracy: 0.1966 - loss: 2.1602 - val_accuracy: 0.2212 - val_loss: 2.1177 - lr: 1.0003e-05\n",
            "\n",
            "Epoch 00060: LearningRateScheduler reducing learning rate to 1.0002279942554237e-05.\n",
            "Epoch 60/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2022 - loss: 2.1584 - val_accuracy: 0.2218 - val_loss: 2.1172 - lr: 1.0002e-05\n",
            "\n",
            "Epoch 00061: LearningRateScheduler reducing learning rate to 1.000182395404339e-05.\n",
            "Epoch 61/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1980 - loss: 2.1610 - val_accuracy: 0.2221 - val_loss: 2.1167 - lr: 1.0002e-05\n",
            "\n",
            "Epoch 00062: LearningRateScheduler reducing learning rate to 1.0001459163234711e-05.\n",
            "Epoch 62/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.1989 - loss: 2.1595 - val_accuracy: 0.2221 - val_loss: 2.1162 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00063: LearningRateScheduler reducing learning rate to 1.000116733058777e-05.\n",
            "Epoch 63/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2029 - loss: 2.1576 - val_accuracy: 0.2211 - val_loss: 2.1156 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00064: LearningRateScheduler reducing learning rate to 1.0000933864470216e-05.\n",
            "Epoch 64/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.2015 - loss: 2.1580 - val_accuracy: 0.2222 - val_loss: 2.1153 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00065: LearningRateScheduler reducing learning rate to 1.0000747091576173e-05.\n",
            "Epoch 65/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1995 - loss: 2.1572 - val_accuracy: 0.2226 - val_loss: 2.1149 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00066: LearningRateScheduler reducing learning rate to 1.0000597673260939e-05.\n",
            "Epoch 66/75\n",
            "49/49 [==============================] - 5s 98ms/step - accuracy: 0.1973 - loss: 2.1592 - val_accuracy: 0.2233 - val_loss: 2.1143 - lr: 1.0001e-05\n",
            "\n",
            "Epoch 00067: LearningRateScheduler reducing learning rate to 1.0000478138608751e-05.\n",
            "Epoch 67/75\n",
            "49/49 [==============================] - 5s 99ms/step - accuracy: 0.2049 - loss: 2.1569 - val_accuracy: 0.2227 - val_loss: 2.1138 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00068: LearningRateScheduler reducing learning rate to 1.0000382510887001e-05.\n",
            "Epoch 68/75\n",
            "49/49 [==============================] - 5s 99ms/step - accuracy: 0.2031 - loss: 2.1545 - val_accuracy: 0.2222 - val_loss: 2.1134 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00069: LearningRateScheduler reducing learning rate to 1.00003060087096e-05.\n",
            "Epoch 69/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2031 - loss: 2.1569 - val_accuracy: 0.2233 - val_loss: 2.1131 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00070: LearningRateScheduler reducing learning rate to 1.000024480696768e-05.\n",
            "Epoch 70/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.1989 - loss: 2.1558 - val_accuracy: 0.2240 - val_loss: 2.1125 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00071: LearningRateScheduler reducing learning rate to 1.0000195845574146e-05.\n",
            "Epoch 71/75\n",
            "49/49 [==============================] - 5s 95ms/step - accuracy: 0.2010 - loss: 2.1560 - val_accuracy: 0.2230 - val_loss: 2.1123 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00072: LearningRateScheduler reducing learning rate to 1.0000156676459317e-05.\n",
            "Epoch 72/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2013 - loss: 2.1570 - val_accuracy: 0.2225 - val_loss: 2.1116 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00073: LearningRateScheduler reducing learning rate to 1.0000125341167454e-05.\n",
            "Epoch 73/75\n",
            "49/49 [==============================] - 5s 97ms/step - accuracy: 0.1996 - loss: 2.1550 - val_accuracy: 0.2233 - val_loss: 2.1114 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00074: LearningRateScheduler reducing learning rate to 1.0000100272933963e-05.\n",
            "Epoch 74/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2037 - loss: 2.1532 - val_accuracy: 0.2246 - val_loss: 2.1108 - lr: 1.0000e-05\n",
            "\n",
            "Epoch 00075: LearningRateScheduler reducing learning rate to 1.000008021834717e-05.\n",
            "Epoch 75/75\n",
            "49/49 [==============================] - 5s 96ms/step - accuracy: 0.2025 - loss: 2.1533 - val_accuracy: 0.2241 - val_loss: 2.1106 - lr: 1.0000e-05\n",
            "Network takes 391.380 seconds to train\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "AGq92F3mtPOC",
        "colab_type": "code",
        "outputId": "f20a5278-3b29-47c9-9709-7257434e737e",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 68
        }
      },
      "source": [
        "model.save_weights(\"resnet_bn_rampup_adam.h5\")\n",
        "!gsutil -m cp -r resnet_bn_rampup_adam.h5 gs://batch_norm_tpu/"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Copying file://resnet_bn_rampup_adam.h5 [Content-Type=application/octet-stream]...\n",
            "/ [1/1 files][  2.4 MiB/  2.4 MiB] 100% Done                                    \n",
            "Operation completed over 1 objects/2.4 MiB.                                      \n"
          ],
          "name": "stdout"
        }
      ]
    }
  ]
}