{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Implementing Different Layers\n",
    "\n",
    "\n",
    "We will illustrate how to use different types of layers in TensorFlow\n",
    "\n",
    "The layers of interest are:\n",
    "1. Convolutional Layer\n",
    "2. Activation Layer \n",
    "3. Max-Pool Layer\n",
    "4. Fully Connected Layer\n",
    "\n",
    "We will generate two different data sets for this script, a 1-D data set (row of data) and a 2-D data set (similar to picture)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import matplotlib.pyplot as plt\n",
    "import csv\n",
    "import os\n",
    "import random\n",
    "import numpy as np\n",
    "import random\n",
    "from tensorflow.python.framework import ops\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "```\n",
    "#---------------------------------------------------|\n",
    "#-------------------1D-data-------------------------|\n",
    "#---------------------------------------------------|\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# Create graph session \n",
    "ops.reset_default_graph()\n",
    "sess = tf.Session()\n",
    "\n",
    "# parameters for the run\n",
    "data_size = 25\n",
    "conv_size = 5\n",
    "maxpool_size = 5\n",
    "stride_size = 1\n",
    "\n",
    "# ensure reproducibility\n",
    "seed=13\n",
    "np.random.seed(seed)\n",
    "tf.set_random_seed(seed)\n",
    "\n",
    "# Generate 1D data\n",
    "data_1d = np.random.normal(size=data_size)\n",
    "\n",
    "# Placeholder\n",
    "x_input_1d = tf.placeholder(dtype=tf.float32, shape=[data_size])\n",
    "\n",
    "#--------Convolution--------\n",
    "def conv_layer_1d(input_1d, my_filter,stride):\n",
    "    # TensorFlow's 'conv2d()' function only works with 4D arrays:\n",
    "    # [batch#, width, height, channels], we have 1 batch, and\n",
    "    # width = 1, but height = the length of the input, and 1 channel.\n",
    "    # So next we create the 4D array by inserting dimension 1's.\n",
    "    input_2d = tf.expand_dims(input_1d, 0)\n",
    "    input_3d = tf.expand_dims(input_2d, 0)\n",
    "    input_4d = tf.expand_dims(input_3d, 3)\n",
    "    # Perform convolution with stride = 1, if we wanted to increase the stride,\n",
    "    # to say '2', then strides=[1,1,2,1]\n",
    "    convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, strides=[1,1,stride,1], padding=\"VALID\")\n",
    "    # Get rid of extra dimensions\n",
    "    conv_output_1d = tf.squeeze(convolution_output)\n",
    "    return(conv_output_1d)\n",
    "\n",
    "# Create filter for convolution.\n",
    "my_filter = tf.Variable(tf.random_normal(shape=[1,conv_size,1,1]))\n",
    "# Create convolution layer\n",
    "my_convolution_output = conv_layer_1d(x_input_1d, my_filter,stride=stride_size)\n",
    "\n",
    "#--------Activation--------\n",
    "def activation(input_1d):\n",
    "    return(tf.nn.relu(input_1d))\n",
    "\n",
    "# Create activation layer\n",
    "my_activation_output = activation(my_convolution_output)\n",
    "\n",
    "#--------Max Pool--------\n",
    "def max_pool(input_1d, width,stride):\n",
    "    # Just like 'conv2d()' above, max_pool() works with 4D arrays.\n",
    "    # [batch_size=1, width=1, height=num_input, channels=1]\n",
    "    input_2d = tf.expand_dims(input_1d, 0)\n",
    "    input_3d = tf.expand_dims(input_2d, 0)\n",
    "    input_4d = tf.expand_dims(input_3d, 3)\n",
    "    # Perform the max pooling with strides = [1,1,1,1]\n",
    "    # If we wanted to increase the stride on our data dimension, say by\n",
    "    # a factor of '2', we put strides = [1, 1, 2, 1]\n",
    "    # We will also need to specify the width of the max-window ('width')\n",
    "    pool_output = tf.nn.max_pool(input_4d, ksize=[1, 1, width, 1],\n",
    "                                 strides=[1, 1, stride, 1],\n",
    "                                 padding='VALID')\n",
    "    # Get rid of extra dimensions\n",
    "    pool_output_1d = tf.squeeze(pool_output)\n",
    "    return(pool_output_1d)\n",
    "\n",
    "my_maxpool_output = max_pool(my_activation_output, width=maxpool_size,stride=stride_size)\n",
    "\n",
    "#--------Fully Connected--------\n",
    "def fully_connected(input_layer, num_outputs):\n",
    "    # First we find the needed shape of the multiplication weight matrix:\n",
    "    # The dimension will be (length of input) by (num_outputs)\n",
    "    weight_shape = tf.squeeze(tf.stack([tf.shape(input_layer),[num_outputs]]))\n",
    "    # Initialize such weight\n",
    "    weight = tf.random_normal(weight_shape, stddev=0.1)\n",
    "    # Initialize the bias\n",
    "    bias = tf.random_normal(shape=[num_outputs])\n",
    "    # Make the 1D input array into a 2D array for matrix multiplication\n",
    "    input_layer_2d = tf.expand_dims(input_layer, 0)\n",
    "    # Perform the matrix multiplication and add the bias\n",
    "    full_output = tf.add(tf.matmul(input_layer_2d, weight), bias)\n",
    "    # Get rid of extra dimensions\n",
    "    full_output_1d = tf.squeeze(full_output)\n",
    "    return(full_output_1d)\n",
    "\n",
    "my_full_output = fully_connected(my_maxpool_output, 5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      ">>>> 1D Data <<<<\n",
      "Input = array of length 25\n",
      "Convolution w/ filter, length = 5, stride size = 1, results in an array of length 21:\n",
      "[-2.63576341 -1.11550486 -0.95571411 -1.69670296 -0.35699379  0.62266493\n",
      "  4.43316031  2.01364899  1.33044648 -2.30629659 -0.82916248 -2.63594174\n",
      "  0.76669347 -2.46465087 -2.2855041   1.49780679  1.6960566   1.48557389\n",
      " -2.79799461  1.18149185  1.42146575]\n",
      "\n",
      "Input = above array of length 21\n",
      "ReLU element wise returns an array of length 21:\n",
      "[ 0.          0.          0.          0.          0.          0.62266493\n",
      "  4.43316031  2.01364899  1.33044648  0.          0.          0.\n",
      "  0.76669347  0.          0.          1.49780679  1.6960566   1.48557389\n",
      "  0.          1.18149185  1.42146575]\n",
      "\n",
      "Input = above array of length 21\n",
      "MaxPool, window length = 5, stride size = 1, results in the array of length 17\n",
      "[ 0.          0.62266493  4.43316031  4.43316031  4.43316031  4.43316031\n",
      "  4.43316031  2.01364899  1.33044648  0.76669347  0.76669347  1.49780679\n",
      "  1.6960566   1.6960566   1.6960566   1.6960566   1.6960566 ]\n",
      "\n",
      "Input = above array of length 17\n",
      "Fully connected layer on all 4 rows with 5 outputs:\n",
      "[ 1.71536076 -0.72340977 -1.22485089 -2.5412786  -0.16338301]\n"
     ]
    }
   ],
   "source": [
    "# Run graph\n",
    "# Initialize Variables\n",
    "init = tf.global_variables_initializer()\n",
    "sess.run(init)\n",
    "\n",
    "feed_dict = {x_input_1d: data_1d}\n",
    "\n",
    "print('>>>> 1D Data <<<<')\n",
    "\n",
    "# Convolution Output\n",
    "print('Input = array of length %d' % (x_input_1d.shape.as_list()[0]))\n",
    "print('Convolution w/ filter, length = %d, stride size = %d, results in an array of length %d:' % \n",
    "      (conv_size,stride_size,my_convolution_output.shape.as_list()[0]))\n",
    "print(sess.run(my_convolution_output, feed_dict=feed_dict))\n",
    "\n",
    "# Activation Output\n",
    "print('\\nInput = above array of length %d' % (my_convolution_output.shape.as_list()[0]))\n",
    "print('ReLU element wise returns an array of length %d:' % (my_activation_output.shape.as_list()[0]))\n",
    "print(sess.run(my_activation_output, feed_dict=feed_dict))\n",
    "\n",
    "# Max Pool Output\n",
    "print('\\nInput = above array of length %d' % (my_activation_output.shape.as_list()[0]))\n",
    "print('MaxPool, window length = %d, stride size = %d, results in the array of length %d' %\n",
    "     (maxpool_size,stride_size,my_maxpool_output.shape.as_list()[0]))\n",
    "print(sess.run(my_maxpool_output, feed_dict=feed_dict))\n",
    "\n",
    "# Fully Connected Output\n",
    "print('\\nInput = above array of length %d' % (my_maxpool_output.shape.as_list()[0]))\n",
    "print('Fully connected layer on all 4 rows with %d outputs:' % \n",
    "      (my_full_output.shape.as_list()[0]))\n",
    "print(sess.run(my_full_output, feed_dict=feed_dict))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "```\n",
    "#---------------------------------------------------|\n",
    "#-------------------2D-data-------------------------|\n",
    "#---------------------------------------------------|\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# Reset Graph\n",
    "ops.reset_default_graph()\n",
    "sess = tf.Session()\n",
    "\n",
    "# parameters for the run\n",
    "row_size = 10\n",
    "col_size = 10\n",
    "conv_size = 2\n",
    "conv_stride_size = 2\n",
    "maxpool_size = 2\n",
    "maxpool_stride_size = 1\n",
    "\n",
    "\n",
    "# ensure reproducibility\n",
    "seed=13\n",
    "np.random.seed(seed)\n",
    "tf.set_random_seed(seed)\n",
    "\n",
    "#Generate 2D data\n",
    "data_size = [row_size,col_size]\n",
    "data_2d = np.random.normal(size=data_size)\n",
    "\n",
    "#--------Placeholder--------\n",
    "x_input_2d = tf.placeholder(dtype=tf.float32, shape=data_size)\n",
    "\n",
    "# Convolution\n",
    "def conv_layer_2d(input_2d, my_filter,stride_size):\n",
    "    # TensorFlow's 'conv2d()' function only works with 4D arrays:\n",
    "    # [batch#, width, height, channels], we have 1 batch, and\n",
    "    # 1 channel, but we do have width AND height this time.\n",
    "    # So next we create the 4D array by inserting dimension 1's.\n",
    "    input_3d = tf.expand_dims(input_2d, 0)\n",
    "    input_4d = tf.expand_dims(input_3d, 3)\n",
    "    # Note the stride difference below!\n",
    "    convolution_output = tf.nn.conv2d(input_4d, filter=my_filter, \n",
    "                                      strides=[1,stride_size,stride_size,1], padding=\"VALID\")\n",
    "    # Get rid of unnecessary dimensions\n",
    "    conv_output_2d = tf.squeeze(convolution_output)\n",
    "    return(conv_output_2d)\n",
    "\n",
    "# Create Convolutional Filter\n",
    "my_filter = tf.Variable(tf.random_normal(shape=[conv_size,conv_size,1,1]))\n",
    "# Create Convolutional Layer\n",
    "my_convolution_output = conv_layer_2d(x_input_2d, my_filter,stride_size=conv_stride_size)\n",
    "\n",
    "#--------Activation--------\n",
    "def activation(input_1d):\n",
    "    return(tf.nn.relu(input_1d))\n",
    "\n",
    "# Create Activation Layer\n",
    "my_activation_output = activation(my_convolution_output)\n",
    "\n",
    "#--------Max Pool--------\n",
    "def max_pool(input_2d, width, height,stride):\n",
    "    # Just like 'conv2d()' above, max_pool() works with 4D arrays.\n",
    "    # [batch_size=1, width=given, height=given, channels=1]\n",
    "    input_3d = tf.expand_dims(input_2d, 0)\n",
    "    input_4d = tf.expand_dims(input_3d, 3)\n",
    "    # Perform the max pooling with strides = [1,1,1,1]\n",
    "    # If we wanted to increase the stride on our data dimension, say by\n",
    "    # a factor of '2', we put strides = [1, 2, 2, 1]\n",
    "    pool_output = tf.nn.max_pool(input_4d, ksize=[1, height, width, 1],\n",
    "                                 strides=[1, stride, stride, 1],\n",
    "                                 padding='VALID')\n",
    "    # Get rid of unnecessary dimensions\n",
    "    pool_output_2d = tf.squeeze(pool_output)\n",
    "    return(pool_output_2d)\n",
    "\n",
    "# Create Max-Pool Layer\n",
    "my_maxpool_output = max_pool(my_activation_output, \n",
    "                             width=maxpool_size, height=maxpool_size,stride=maxpool_stride_size)\n",
    "\n",
    "\n",
    "#--------Fully Connected--------\n",
    "def fully_connected(input_layer, num_outputs):\n",
    "    # In order to connect our whole W byH 2d array, we first flatten it out to\n",
    "    # a W times H 1D array.\n",
    "    flat_input = tf.reshape(input_layer, [-1])\n",
    "    # We then find out how long it is, and create an array for the shape of\n",
    "    # the multiplication weight = (WxH) by (num_outputs)\n",
    "    weight_shape = tf.squeeze(tf.stack([tf.shape(flat_input),[num_outputs]]))\n",
    "    # Initialize the weight\n",
    "    weight = tf.random_normal(weight_shape, stddev=0.1)\n",
    "    # Initialize the bias\n",
    "    bias = tf.random_normal(shape=[num_outputs])\n",
    "    # Now make the flat 1D array into a 2D array for multiplication\n",
    "    input_2d = tf.expand_dims(flat_input, 0)\n",
    "    # Multiply and add the bias\n",
    "    full_output = tf.add(tf.matmul(input_2d, weight), bias)\n",
    "    # Get rid of extra dimension\n",
    "    full_output_2d = tf.squeeze(full_output)\n",
    "    return(full_output_2d)\n",
    "\n",
    "# Create Fully Connected Layer\n",
    "my_full_output = fully_connected(my_maxpool_output, 5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      ">>>> 2D Data <<<<\n",
      "Input = [10, 10] array\n",
      "[2, 2] Convolution, stride size = [2, 2] , results in the [5, 5] array\n",
      "[[ 0.14431179  0.72783369  1.51149166 -1.28099763  1.78439188]\n",
      " [-2.54503059  0.76156765 -0.51650006  0.77131093  0.37542343]\n",
      " [ 0.49345911  0.01592223  0.38653135 -1.47997665  0.6952765 ]\n",
      " [-0.34617192 -2.53189754 -0.9525758  -1.4357065   0.66257358]\n",
      " [-1.98540258  0.34398788  2.53760481 -0.86784822 -0.3100495 ]]\n",
      "\n",
      "Input = the above [5, 5] array\n",
      "ReLU element wise returns the [5, 5] array\n",
      "[[ 0.14431179  0.72783369  1.51149166  0.          1.78439188]\n",
      " [ 0.          0.76156765  0.          0.77131093  0.37542343]\n",
      " [ 0.49345911  0.01592223  0.38653135  0.          0.6952765 ]\n",
      " [ 0.          0.          0.          0.          0.66257358]\n",
      " [ 0.          0.34398788  2.53760481  0.          0.        ]]\n",
      "\n",
      "Input = the above [5, 5] array\n",
      "MaxPool, stride size = [1, 1], results in [4, 4] array\n",
      "[[ 0.76156765  1.51149166  1.51149166  1.78439188]\n",
      " [ 0.76156765  0.76156765  0.77131093  0.77131093]\n",
      " [ 0.49345911  0.38653135  0.38653135  0.6952765 ]\n",
      " [ 0.34398788  2.53760481  2.53760481  0.66257358]]\n",
      "\n",
      "Input = the above [4, 4] array\n",
      "Fully connected layer on all 4 rows results in 5 outputs:\n",
      "[ 0.08245847 -0.16351229 -0.55429065 -0.24322605 -0.99900764]\n"
     ]
    }
   ],
   "source": [
    "# Run graph\n",
    "# Initialize Variables\n",
    "init = tf.global_variables_initializer()\n",
    "sess.run(init)\n",
    "\n",
    "feed_dict = {x_input_2d: data_2d}\n",
    "\n",
    "print('>>>> 2D Data <<<<')\n",
    "\n",
    "# Convolution Output\n",
    "print('Input = %s array' % (x_input_2d.shape.as_list()))\n",
    "print('%s Convolution, stride size = [%d, %d] , results in the %s array' % \n",
    "      (my_filter.get_shape().as_list()[:2],conv_stride_size,conv_stride_size,my_convolution_output.shape.as_list()))\n",
    "print(sess.run(my_convolution_output, feed_dict=feed_dict))\n",
    "\n",
    "# Activation Output\n",
    "print('\\nInput = the above %s array' % (my_convolution_output.shape.as_list()))\n",
    "print('ReLU element wise returns the %s array' % (my_activation_output.shape.as_list()))\n",
    "print(sess.run(my_activation_output, feed_dict=feed_dict))\n",
    "\n",
    "# Max Pool Output\n",
    "print('\\nInput = the above %s array' % (my_activation_output.shape.as_list()))\n",
    "print('MaxPool, stride size = [%d, %d], results in %s array' % \n",
    "      (maxpool_stride_size,maxpool_stride_size,my_maxpool_output.shape.as_list()))\n",
    "print(sess.run(my_maxpool_output, feed_dict=feed_dict))\n",
    "\n",
    "# Fully Connected Output\n",
    "print('\\nInput = the above %s array' % (my_maxpool_output.shape.as_list()))\n",
    "print('Fully connected layer on all %d rows results in %s outputs:' % \n",
    "      (my_maxpool_output.shape.as_list()[0],my_full_output.shape.as_list()[0]))\n",
    "print(sess.run(my_full_output, feed_dict=feed_dict))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "Python [conda env:tf-cpu]",
   "language": "python",
   "name": "conda-env-tf-cpu-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
