{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Copyright 2019 Google LLC\n",
    "#\n",
    "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
    "# you may not use this file except in compliance with the License.\n",
    "# You may obtain a copy of the License at\n",
    "#\n",
    "#     https://www.apache.org/licenses/LICENSE-2.0\n",
    "#\n",
    "# Unless required by applicable law or agreed to in writing, software\n",
    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
    "# See the License for the specific language governing permissions and\n",
    "# limitations under the License."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Modern CNN Architecture - Lab 2"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "In this lab, you will attempt to find an improvement on a mini-ResNet for CIFAR-10.\n",
    "\n",
    "Below is a composable \"class\" based version for building ResNet networks. Spend a few moments looking at the structure and get familiar."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.keras import Model, Input\n",
    "from tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, BatchNormalization, ReLU\n",
    "from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Add\n",
    "\n",
    "class ResNetV2(object):\n",
    "    \"\"\" Construct a Residual Convolution Network Network V2 \"\"\"\n",
    "    # Meta-parameter: list of groups: number of filters and number of blocks\n",
    "    groups = { 50 : [ (64, 3), (128, 4), (256, 6),  (512, 3) ],           # ResNet50\n",
    "               101: [ (64, 3), (128, 4), (256, 23), (512, 3) ],           # ResNet101\n",
    "               152: [ (64, 3), (128, 8), (256, 36), (512, 3) ]            # ResNet152\n",
    "             }\n",
    "    _model = None\n",
    "    init_weights = 'he_normal'\n",
    "\n",
    "    def __init__(self, n_layers, input_shape=(224, 224, 3), n_classes=1000):\n",
    "        \"\"\" Construct a Residual Convolutional Neural Network V2\n",
    "            n_layers   : number of layers\n",
    "            input_shape: input shape\n",
    "            n_classes  : number of output classes\n",
    "        \"\"\"\n",
    "        if n_layers not in [50, 101, 152]:\n",
    "            raise Exception(\"ResNet: Invalid value for n_layers\")\n",
    "\n",
    "        # The input tensor\n",
    "        inputs = Input(input_shape)\n",
    "\n",
    "        # The stem convolutional group\n",
    "        x = self.stem(inputs)\n",
    "\n",
    "        # The learner\n",
    "        x = self.learner(x, self.groups[n_layers])\n",
    "\n",
    "        # The classifier for 1000 classes\n",
    "        outputs = self.classifier(x, n_classes)\n",
    "\n",
    "        # Instantiate the Model\n",
    "        self._model = Model(inputs, outputs)\n",
    "\n",
    "    @property\n",
    "    def model(self):\n",
    "        return self._model\n",
    "\n",
    "    @model.setter\n",
    "    def model(self, _model):\n",
    "        self._model = _model\n",
    "\n",
    "    def stem(self, inputs):\n",
    "        \"\"\" Construct the Stem Convolutional Group \n",
    "            inputs : the input vector\n",
    "        \"\"\"\n",
    "        # The 224x224 images are zero padded (black - no signal) to be 230x230 images prior to the first convolution\n",
    "        x = ZeroPadding2D(padding=(3, 3))(inputs)\n",
    "    \n",
    "        # First Convolutional layer uses large (coarse) filter\n",
    "        x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', use_bias=False, kernel_initializer=self.init_weights)(x)\n",
    "        x = BatchNormalization()(x)\n",
    "        x = ReLU()(x)\n",
    "    \n",
    "        # Pooled feature maps will be reduced by 75%\n",
    "        x = ZeroPadding2D(padding=(1, 1))(x)\n",
    "        x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n",
    "        return x\n",
    "\n",
    "    def learner(self, x, groups):\n",
    "        \"\"\" Construct the Learner\n",
    "            x     : input to the learner\n",
    "            groups: list of groups: number of filters and blocks\n",
    "        \"\"\"\n",
    "        # First Residual Block Group (not strided)\n",
    "        n_filters, n_blocks = groups.pop(0)\n",
    "        x = ResNetV2.group(x, n_filters, n_blocks, strides=(1, 1))\n",
    "\n",
    "        # Remaining Residual Block Groups (strided)\n",
    "        for n_filters, n_blocks in groups:\n",
    "            x = ResNetV2.group(x, n_filters, n_blocks)\n",
    "        return x\n",
    "    \n",
    "    @staticmethod\n",
    "    def group(x, n_filters, n_blocks, strides=(2, 2), init_weights=None):\n",
    "        \"\"\" Construct a Residual Group\n",
    "            x         : input into the group\n",
    "            n_filters : number of filters for the group\n",
    "            n_blocks  : number of residual blocks with identity link\n",
    "            strides   : whether the projection block is a strided convolution\n",
    "        \"\"\"\n",
    "        # Double the size of filters to fit the first Residual Group\n",
    "        x = ResNetV2.projection_block(x, n_filters, strides=strides, init_weights=init_weights)\n",
    "\n",
    "        # Identity residual blocks\n",
    "        for _ in range(n_blocks):\n",
    "            x = ResNetV2.identity_block(x, n_filters, init_weights=init_weights)\n",
    "        return x\n",
    "\n",
    "    @staticmethod\n",
    "    def identity_block(x, n_filters, init_weights=None):\n",
    "        \"\"\" Construct a Bottleneck Residual Block with Identity Link\n",
    "            x        : input into the block\n",
    "            n_filters: number of filters\n",
    "        \"\"\"\n",
    "        if init_weights is None:\n",
    "            init_weights = ResNetV2.init_weights\n",
    "    \n",
    "        # Save input vector (feature maps) for the identity link\n",
    "        shortcut = x\n",
    "    \n",
    "        ## Construct the 1x1, 3x3, 1x1 convolution block\n",
    "    \n",
    "        # Dimensionality reduction\n",
    "        x = BatchNormalization()(x)\n",
    "        x = ReLU()(x)\n",
    "        x = Conv2D(n_filters, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer=init_weights)(x)\n",
    "\n",
    "        # Bottleneck layer\n",
    "        x = BatchNormalization()(x)\n",
    "        x = ReLU()(x)\n",
    "        x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding=\"same\", use_bias=False, kernel_initializer=init_weights)(x)\n",
    "\n",
    "        # Dimensionality restoration - increase the number of output filters by 4X\n",
    "        x = BatchNormalization()(x)\n",
    "        x = ReLU()(x)\n",
    "        x = Conv2D(n_filters * 4, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer=init_weights)(x)\n",
    "\n",
    "        # Add the identity link (input) to the output of the residual block\n",
    "        x = Add()([shortcut, x])\n",
    "        return x\n",
    "\n",
    "    @staticmethod\n",
    "    def projection_block(x, n_filters, strides=(2,2), init_weights=None):\n",
    "        \"\"\" Construct a Bottleneck Residual Block of Convolutions with Projection Shortcut\n",
    "            Increase the number of filters by 4X\n",
    "            x        : input into the block\n",
    "            n_filters: number of filters\n",
    "            strides  : whether the first convolution is strided\n",
    "        \"\"\"\n",
    "        # Construct the projection shortcut\n",
    "        # Increase filters by 4X to match shape when added to output of block\n",
    "        shortcut = BatchNormalization()(x)\n",
    "        shortcut = Conv2D(4 * n_filters, (1, 1), strides=strides, use_bias=False, kernel_initializer='he_normal')(shortcut)\n",
    "\n",
    "        ## Construct the 1x1, 3x3, 1x1 convolution block\n",
    "    \n",
    "        # Dimensionality reduction\n",
    "        x = BatchNormalization()(x)\n",
    "        x = ReLU()(x)\n",
    "        x = Conv2D(n_filters, (1, 1), strides=(1,1), use_bias=False, kernel_initializer='he_normal')(x)\n",
    "\n",
    "        # Bottleneck layer\n",
    "        # Feature pooling when strides=(2, 2)\n",
    "        x = BatchNormalization()(x)\n",
    "        x = ReLU()(x)\n",
    "        x = Conv2D(n_filters, (3, 3), strides=strides, padding='same', use_bias=False, kernel_initializer='he_normal')(x)\n",
    "\n",
    "        # Dimensionality restoration - increase the number of filters by 4X\n",
    "        x = BatchNormalization()(x)\n",
    "        x = ReLU()(x)\n",
    "        x = Conv2D(4 * n_filters, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)\n",
    "\n",
    "        # Add the projection shortcut to the output of the residual block\n",
    "        x = Add()([x, shortcut])\n",
    "        return x\n",
    "\n",
    "    def classifier(self, x, n_classes):\n",
    "        \"\"\" Construct the Classifier Group \n",
    "            x         : input to the classifier\n",
    "            n_classes : number of output classes\n",
    "        \"\"\"\n",
    "        # Pool at the end of all the convolutional residual blocks\n",
    "        x = GlobalAveragePooling2D()(x)\n",
    "\n",
    "        # Final Dense Outputting Layer for the outputs\n",
    "        outputs = Dense(n_classes, activation='softmax', kernel_initializer=self.init_weights)(x)\n",
    "        return outputs\n",
    "\n",
    "# Example\n",
    "# resnet = ResNetV2(50)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Starting mini-ResNet\n",
    "\n",
    "Below is a mini-ResNet I wrote for CIFAR-10. Notice how at the bottleneck layer the feature maps are 3 x 3 (max pooling). \n",
    "\n",
    "#### Model Summary\n",
    "\n",
    "```\n",
    "REMOVED for brevity ...\n",
    "batch_normalization_783 (BatchN (None, 16, 16, 8)    32          add_259[0][0]                    \n",
    "__________________________________________________________________________________________________\n",
    "conv2d_789 (Conv2D)             (None, 8, 8, 1024)   262144      re_lu_782[0][0]                  \n",
    "__________________________________________________________________________________________________\n",
    "conv2d_786 (Conv2D)             (None, 8, 8, 1024)   8192        batch_normalization_783[0][0]    \n",
    "__________________________________________________________________________________________________\n",
    "add_260 (Add)                   (None, 8, 8, 1024)   0           conv2d_789[0][0]                 \n",
    "                                                                 conv2d_786[0][0]                 \n",
    "__________________________________________________________________________________________________\n",
    "batch_normalization_787 (BatchN (None, 8, 8, 1024)   4096        add_260[0][0]                    \n",
    "__________________________________________________________________________________________________\n",
    "re_lu_783 (ReLU)                (None, 8, 8, 1024)   0           batch_normalization_787[0][0]    \n",
    "__________________________________________________________________________________________________\n",
    "conv2d_790 (Conv2D)             (None, 8, 8, 256)    262144      re_lu_783[0][0]                  \n",
    "__________________________________________________________________________________________________\n",
    "batch_normalization_788 (BatchN (None, 8, 8, 256)    1024        conv2d_790[0][0]                 \n",
    "__________________________________________________________________________________________________\n",
    "re_lu_784 (ReLU)                (None, 8, 8, 256)    0           batch_normalization_788[0][0]    \n",
    "__________________________________________________________________________________________________\n",
    "conv2d_791 (Conv2D)             (None, 8, 8, 256)    589824      re_lu_784[0][0]                  \n",
    "__________________________________________________________________________________________________\n",
    "batch_normalization_789 (BatchN (None, 8, 8, 256)    1024        conv2d_791[0][0]                 \n",
    "__________________________________________________________________________________________________\n",
    "re_lu_785 (ReLU)                (None, 8, 8, 256)    0           batch_normalization_789[0][0]    \n",
    "__________________________________________________________________________________________________\n",
    "conv2d_792 (Conv2D)             (None, 8, 8, 1024)   262144      re_lu_785[0][0]                  \n",
    "__________________________________________________________________________________________________\n",
    "add_261 (Add)                   (None, 8, 8, 1024)   0           add_260[0][0]                    \n",
    "                                                                 conv2d_792[0][0]                 \n",
    "__________________________________________________________________________________________________\n",
    "flatten_1 (Flatten)             (None, 65536)        0           add_261[0][0]                    \n",
    "__________________________________________________________________________________________________\n",
    "dense_1 (Dense)                 (None, 10)           655370      flatten_1[0][0]                  \n",
    "==================================================================================================\n",
    "Total params: 2,656,334\n",
    "Trainable params: 2,648,998\n",
    "Non-trainable params: 7,336\n",
    "```\n",
    "\n",
    "#### Training\n",
    "\n",
    "Below is the results for training for 10 epochs.\n",
    "```\n",
    "Train on 45000 samples, validate on 5000 samples\n",
    "Epoch 1/10\n",
    "45000/45000 [==============================] - 1229s 27ms/sample - loss: 4.3040 - acc: 0.1834 - val_loss: 2.1594 - val_acc: 0.2208\n",
    "Epoch 2/10\n",
    "45000/45000 [==============================] - 1029s 23ms/sample - loss: 2.0595 - acc: 0.2479 - val_loss: 1.9784 - val_acc: 0.2804\n",
    "Epoch 3/10\n",
    "45000/45000 [==============================] - 1144s 25ms/sample - loss: 1.9655 - acc: 0.2876 - val_loss: 1.9719 - val_acc: 0.2832\n",
    "Epoch 4/10\n",
    "45000/45000 [==============================] - 1149s 25ms/sample - loss: 1.8521 - acc: 0.3316 - val_loss: 1.7835 - val_acc: 0.3534\n",
    "Epoch 5/10\n",
    "45000/45000 [==============================] - 1227s 27ms/sample - loss: 1.7317 - acc: 0.3791 - val_loss: 1.7436 - val_acc: 0.3712\n",
    "Epoch 6/10\n",
    "45000/45000 [==============================] - 1138s 25ms/sample - loss: 1.6158 - acc: 0.4204 - val_loss: 1.6352 - val_acc: 0.4106\n",
    "Epoch 7/10\n",
    "45000/45000 [==============================] - 1570s 35ms/sample - loss: 1.4964 - acc: 0.4667 - val_loss: 1.4699 - val_acc: 0.4772\n",
    "Epoch 8/10\n",
    "45000/45000 [==============================] - 1148s 26ms/sample - loss: 1.3796 - acc: 0.5071 - val_loss: 1.3872 - val_acc: 0.5066\n",
    "Epoch 9/10\n",
    "45000/45000 [==============================] - 1189s 26ms/sample - loss: 1.2626 - acc: 0.5513 - val_loss: 1.3557 - val_acc: 0.5160\n",
    "Epoch 10/10\n",
    "45000/45000 [==============================] - 1134s 25ms/sample - loss: 1.1348 - acc: 0.6020 - val_loss: 1.3638 - val_acc: 0.5306\n",
    "```\n",
    "\n",
    "#### Try to Improve\n",
    "\n",
    "How could we improve this? \n",
    "\n",
    "1. Perhaps adding regularization (dropout) and replace Flatten with GlobalAverage2D?\n",
    "\n",
    "2. Perhaps reduce the number of filters and add another ResNet group?\n",
    "\n",
    "3. Perhaps andcraft a different configuration for the second group?\n",
    "\n",
    "4. Perhaps concatenate the output from the stem convolution to the output of the last group (but you will have to make the feature maps the same size)?\n",
    "\n",
    "5. Think of your own idea?\n",
    "\n",
    "If this is a classroom, we will split into 4 teams and each team will use a different approach.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Make mini-ResNetV1 for CIFAR-10\n",
    "from tensorflow.keras import Input, Model\n",
    "from tensorflow.keras.layers import Conv2D, Flatten, Dense\n",
    "\n",
    "# Stem\n",
    "inputs = Input((32, 32, 3))\n",
    "x = Conv2D(32, (3, 3), strides=1, padding='same', activation='relu')(inputs)\n",
    "\n",
    "# Learner\n",
    "# Residual group: 2 blocks, 128 filters\n",
    "# Residual block with projection, 256 filters\n",
    "# Residual block with identity, 256 filters\n",
    "x = ResNetV2.group(x, 2, 128)\n",
    "x = ResNetV2.projection_block(x, 256)\n",
    "x = ResNetV2.identity_block(x, 256)\n",
    "\n",
    "# Classifier\n",
    "x = Flatten()(x)\n",
    "outputs = Dense(10, activation='softmax')(x)\n",
    "model = Model(inputs, outputs)\n",
    "model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['acc'])\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensorflow.keras.datasets import cifar10\n",
    "import numpy as np\n",
    "(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n",
    "x_train = (x_train / 255.0).astype(np.float32)\n",
    "x_test  = (x_test  / 255.0).astype(np.float32)\n",
    "\n",
    "model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.1, verbose=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
