{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Implementation of Accurate Binary Convolution Layer\n",
    "The main notebook is **ABC.ipynb**. In this notebook, *alphas* training is moved out of the layer, so that the variables and functions can be made reusable for inference time."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from __future__ import division, print_function\n",
    "import tensorflow as tf\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### See *ABC* notebook for explanation of all the functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def get_mean_stddev(input_tensor):\n",
    "    with tf.name_scope('mean_stddev_cal'):\n",
    "        mean, variance = tf.nn.moments(input_tensor, axes=range(len(input_tensor.get_shape())))\n",
    "        stddev = tf.sqrt(variance, name=\"standard_deviation\")\n",
    "        return mean, stddev\n",
    "    \n",
    "# TODO: Allow shift parameters to be learnable\n",
    "def get_shifted_stddev(stddev, no_filters):\n",
    "    with tf.name_scope('shifted_stddev'):\n",
    "        spreaded_deviation = -1. + (2./(no_filters - 1)) * tf.convert_to_tensor(range(no_filters),\n",
    "                                                                                dtype=tf.float32)\n",
    "        return spreaded_deviation * stddev\n",
    "    \n",
    "def get_binary_filters(convolution_filters, no_filters, name=None):\n",
    "    with tf.name_scope(name, default_name=\"get_binary_filters\"):\n",
    "        mean, stddev = get_mean_stddev(convolution_filters)\n",
    "        shifted_stddev = get_shifted_stddev(stddev, no_filters)\n",
    "        \n",
    "        # Normalize the filters by subtracting mean from them\n",
    "        mean_adjusted_filters = convolution_filters - mean\n",
    "        \n",
    "        # Tiling filters to match the number of filters\n",
    "        expanded_filters = tf.expand_dims(mean_adjusted_filters, axis=0, name=\"expanded_filters\")\n",
    "        tiled_filters = tf.tile(expanded_filters, [no_filters] + [1] * len(convolution_filters.get_shape()),\n",
    "                                name=\"tiled_filters\")\n",
    "        \n",
    "        # Similarly tiling spreaded stddev to match the shape of tiled_filters\n",
    "        expanded_stddev = tf.reshape(shifted_stddev, [no_filters] + [1] * len(convolution_filters.get_shape()),\n",
    "                                     name=\"expanded_stddev\")\n",
    "        \n",
    "        binarized_filters = tf.sign(tiled_filters + expanded_stddev, name=\"binarized_filters\")\n",
    "        return binarized_filters"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Now, instead of get_alphas, implementation of **alpha training** is provided, which takes input of the *filters*, *binarized filters*, and *alphas* and returns the loss and the alpha training operation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def alpha_training(convolution_filters, binary_filters, alphas, no_filters):\n",
    "    with tf.name_scope(\"alpha_training\"):\n",
    "        reshaped_convolution_filters = tf.reshape(convolution_filters, [-1], name=\"reshaped_convolution_filters\")\n",
    "        reshaped_binary_filters = tf.reshape(binary_filters, [no_filters, -1],\n",
    "                                             name=\"reshaped_binary_filters\")\n",
    "        \n",
    "        weighted_sum_filters = tf.reduce_sum(tf.multiply(alphas, reshaped_binary_filters),\n",
    "                                             axis=0, name=\"weighted_sum_filters\")\n",
    "        \n",
    "        # Defining loss\n",
    "        error = tf.square(reshaped_convolution_filters - weighted_sum_filters, name=\"alphas_error\")\n",
    "        loss = tf.reduce_mean(error, axis=0, name=\"alphas_loss\")\n",
    "        \n",
    "        # Defining optimizer\n",
    "        training_op = tf.train.AdamOptimizer().minimize(loss, var_list=[alphas],\n",
    "                                                        name=\"alphas_training_op\")\n",
    "        \n",
    "        return training_op, loss"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Now, both *ABC* and *ApproxConv* is updated to incorporate this change"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def ApproxConv(no_filters, alphas, binary_filters, convolution_biases=None,\n",
    "               strides=(1, 1), padding=\"VALID\", name=None):\n",
    "    with tf.name_scope(name, \"ApproxConv\"):\n",
    "        if convolution_biases is None:\n",
    "            biases = 0.\n",
    "        else:\n",
    "            biases = convolution_biases\n",
    "        \n",
    "        # Defining function for closure to accept multiple inputs with same filters\n",
    "        def ApproxConvLayer(input_tensor, name=None):\n",
    "            with tf.name_scope(name, \"ApproxConv_Layer\"):\n",
    "                # Reshaping alphas to match the input tensor\n",
    "                reshaped_alphas = tf.reshape(alphas,\n",
    "                                             shape=[no_filters] + [1] * len(input_tensor.get_shape()),\n",
    "                                             name=\"reshaped_alphas\")\n",
    "                \n",
    "                # Calculating convolution for each binary filter\n",
    "                approxConv_outputs = []\n",
    "                for index in range(no_filters):\n",
    "                    # Binary convolution\n",
    "                    this_conv = tf.nn.conv2d(input_tensor, binary_filters[index],\n",
    "                                             strides=(1,) + strides + (1,),\n",
    "                                             padding=padding)\n",
    "                    approxConv_outputs.append(this_conv + biases)\n",
    "                conv_outputs = tf.convert_to_tensor(approxConv_outputs, dtype=tf.float32,\n",
    "                                                    name=\"conv_outputs\")\n",
    "                \n",
    "                # Summing up each of the binary convolution\n",
    "                ApproxConv_output = tf.reduce_sum(tf.multiply(conv_outputs, reshaped_alphas), axis=0)\n",
    "                \n",
    "                return ApproxConv_output\n",
    "        \n",
    "        return ApproxConvLayer\n",
    "    \n",
    "def ABC(binary_filters, alphas, shift_parameters, betas, \n",
    "        convolution_biases=None, no_binary_filters=5, no_ApproxConvLayers=5,\n",
    "        strides=(1, 1), padding=\"VALID\", name=None):\n",
    "    with tf.name_scope(name, \"ABC\"):        \n",
    "        # Instantiating the ApproxConv Layer\n",
    "        ApproxConvLayer= ApproxConv(no_binary_filters, alphas, binary_filters, convolution_biases,\n",
    "                                    strides, padding)\n",
    "        \n",
    "        def ABCLayer(input_tensor, name=None):\n",
    "            with tf.name_scope(name, \"ABCLayer\"):\n",
    "                # Reshaping betas to match the input tensor\n",
    "                reshaped_betas = tf.reshape(betas,\n",
    "                                            shape=[no_ApproxConvLayers] + [1] * len(input_tensor.get_shape()),\n",
    "                                            name=\"reshaped_betas\")\n",
    "                \n",
    "                # Calculating ApproxConv for each shifted input\n",
    "                ApproxConv_layers = []\n",
    "                for index in range(no_ApproxConvLayers):\n",
    "                    # Shifting and binarizing input\n",
    "                    shifted_input = tf.clip_by_value(input_tensor + shift_parameters[index], 0., 1.,\n",
    "                                                     name=\"shifted_input_\" + str(index))\n",
    "                    binarized_activation = tf.sign(shifted_input - 0.5)\n",
    "                    \n",
    "                    # Passing through the ApproxConv layer\n",
    "                    ApproxConv_layers.append(ApproxConvLayer(binarized_activation))\n",
    "                ApproxConv_output = tf.convert_to_tensor(ApproxConv_layers, dtype=tf.float32,\n",
    "                                                         name=\"ApproxConv_output\")\n",
    "                \n",
    "                # Taking the weighted sum using the betas\n",
    "                ABC_output = tf.reduce_sum(tf.multiply(ApproxConv_output, reshaped_betas), axis=0)\n",
    "                return ABC_output\n",
    "        \n",
    "        return ABCLayer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Now a layer can be created as follows"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "test_filters = np.random.normal(size=(3, 3, 1, 64))\n",
    "test_biases = np.random.normal(size=(64,))\n",
    "test_input = np.random.normal(size=(32, 28, 28, 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "g = tf.Graph()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with g.as_default():\n",
    "    filters = tf.Variable(tf.convert_to_tensor(test_filters, dtype=tf.float32), name=\"convolution_filters\")\n",
    "    biases = tf.Variable(tf.convert_to_tensor(test_biases, dtype=tf.float32), name=\"convolution_biases\")\n",
    "    alphas = tf.Variable(tf.constant(1., shape=(5, 1)), dtype=tf.float32,\n",
    "                         name=\"alphas\")\n",
    "    shift_parameters = tf.Variable(tf.constant(0., shape=(5, 1)), dtype=tf.float32,\n",
    "                                   name=\"shift_parameters\")\n",
    "    betas = tf.Variable(tf.constant(1., shape=(5, 1)), dtype=tf.float32,\n",
    "                        name=\"betas\")\n",
    "    \n",
    "    binary_filters = get_binary_filters(filters, 5)\n",
    "    alphas_training_op, alphas_loss = alpha_training(tf.stop_gradient(filters),\n",
    "                                                     tf.stop_gradient(binary_filters),\n",
    "                                                     alphas, 5)\n",
    "    ABC_layer = ABC(binary_filters, tf.stop_gradient(alphas), shift_parameters, betas, biases)\n",
    "    \n",
    "    output = ABC_layer(tf.convert_to_tensor(test_input, dtype=tf.float32))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Testing\n",
    "Let's test the updated architecture on MNIST again"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\n",
      "Extracting /tmp/data/train-images-idx3-ubyte.gz\n",
      "Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\n",
      "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n",
      "Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\n",
      "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n",
      "Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\n",
      "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# MNIST data import\n",
    "# Importing data\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "!mkdir -p /tmp/data\n",
    "mnist = input_data.read_data_sets(\"/tmp/data/\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The following is exactly same as in the other notebook *ABC*"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Defining utils function\n",
    "def weight_variable(shape, name=\"weight\"):\n",
    "    initial = tf.truncated_normal(shape, stddev=0.1)\n",
    "    return tf.Variable(initial, name=name)\n",
    "\n",
    "def bias_variable(shape, name=\"bias\"):\n",
    "    initial = tf.constant(0.1, shape=shape)\n",
    "    return tf.Variable(initial, name=name)\n",
    "\n",
    "def conv2d(x, W):\n",
    "    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n",
    "\n",
    "def max_pool_2x2(x):\n",
    "    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='SAME')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Creating the graph\n",
    "without_ABC_graph = tf.Graph()\n",
    "with without_ABC_graph.as_default():\n",
    "    # Defining inputs\n",
    "    x = tf.placeholder(dtype=tf.float32)\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "    \n",
    "     # Convolution Layer 1\n",
    "    W_conv1 = weight_variable(shape=([5, 5, 1, 32]), name=\"W_conv1\")\n",
    "    b_conv1 = bias_variable(shape=[32], name=\"b_conv1\")\n",
    "    conv1 = (conv2d(x_image, W_conv1) + b_conv1)\n",
    "    pool1 = max_pool_2x2(conv1)\n",
    "    bn_conv1 = tf.layers.batch_normalization(pool1, axis=-1, name=\"batchNorm1\")\n",
    "    h_conv1 = tf.nn.relu(bn_conv1)\n",
    "\n",
    "    # Convolution Layer 2\n",
    "    W_conv2 = weight_variable(shape=([5, 5, 32, 64]), name=\"W_conv2\")\n",
    "    b_conv2 = bias_variable(shape=[64], name=\"b_conv2\")\n",
    "    conv2 = (conv2d(h_conv1, W_conv2) + b_conv2)\n",
    "    pool2 = max_pool_2x2(conv2)\n",
    "    bn_conv2 = tf.layers.batch_normalization(pool2, axis=-1, name=\"batchNorm2\")\n",
    "    h_conv2 = tf.nn.relu(bn_conv2)\n",
    "\n",
    "    # Flat the conv2 output\n",
    "    h_conv2_flat = tf.reshape(h_conv2, shape=(-1, 7*7*64))\n",
    "\n",
    "    # Dense layer1\n",
    "    W_fc1 = weight_variable([7 * 7 * 64, 1024])\n",
    "    b_fc1 = bias_variable([1024])\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)\n",
    "\n",
    "    # Dropout\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "    # Output layer\n",
    "    W_fc2 = weight_variable([1024, 10])\n",
    "    b_fc2 = bias_variable([10])\n",
    "\n",
    "    y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n",
    "    \n",
    "    # Labels\n",
    "    y = tf.placeholder(tf.int32, [None])\n",
    "    y_ = tf.one_hot(y, 10)\n",
    "    \n",
    "    # Defining optimizer and loss\n",
    "    cross_entropy = tf.reduce_mean(\n",
    "        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n",
    "    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n",
    "    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    \n",
    "    # Initializer\n",
    "    graph_init = tf.global_variables_initializer()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Defining variables to save. These will be fed to our custom layer\n",
    "variables_to_save = {\"W_conv1\": W_conv1,\n",
    "                     \"b_conv1\": b_conv1,\n",
    "                     \"W_conv2\": W_conv2,\n",
    "                     \"b_conv2\": b_conv2,\n",
    "                     \"W_fc1\": W_fc1,\n",
    "                     \"b_fc1\": b_fc1,\n",
    "                     \"W_fc2\": W_fc2,\n",
    "                     \"b_fc2\": b_fc2}\n",
    "values = {}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 1  Val accuracy: 80.0000%  Loss: 0.575571\n",
      "Epoch: 2  Val accuracy: 88.0000%  Loss: 0.516295\n",
      "Epoch: 3  Val accuracy: 98.0000%  Loss: 0.074902\n",
      "Epoch: 4  Val accuracy: 96.0000%  Loss: 0.114960\n",
      "Epoch: 5  Val accuracy: 96.0000%  Loss: 0.108748        \n"
     ]
    }
   ],
   "source": [
    "n_epochs = 5\n",
    "batch_size = 32\n",
    "        \n",
    "with tf.Session(graph=without_ABC_graph) as sess:\n",
    "    sess.run(graph_init)\n",
    "    for epoch in range(n_epochs):\n",
    "        for iteration in range(1, 200 + 1):\n",
    "            batch = mnist.train.next_batch(50)\n",
    "            \n",
    "            # Run operation and calculate loss\n",
    "            _, loss_train = sess.run([train_step, cross_entropy],\n",
    "                                     feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5})\n",
    "            print(\"\\rIteration: {}/{} ({:.1f}%)  Loss: {:.5f}\".format(\n",
    "                      iteration, 200,\n",
    "                      iteration * 100 / 200,\n",
    "                      loss_train),\n",
    "                  end=\"\")\n",
    "\n",
    "        # At the end of each epoch,\n",
    "        # measure the validation loss and accuracy:\n",
    "        loss_vals = []\n",
    "        acc_vals = []\n",
    "        for iteration in range(1, 200 + 1):\n",
    "            X_batch, y_batch = mnist.validation.next_batch(batch_size)\n",
    "            acc_val, loss_val = sess.run([accuracy, cross_entropy],\n",
    "                                     feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})\n",
    "            loss_vals.append(loss_val)\n",
    "            acc_vals.append(acc_val)\n",
    "            print(\"\\rEvaluating the model: {}/{} ({:.1f}%)\".format(iteration, 200,\n",
    "                iteration * 100 / 200),\n",
    "                  end=\" \" * 10)\n",
    "        loss_val = np.mean(loss_vals)\n",
    "        acc_val = np.mean(acc_vals)\n",
    "        print(\"\\rEpoch: {}  Val accuracy: {:.4f}%  Loss: {:.6f}\".format(\n",
    "            epoch + 1, acc_val * 100, loss_val))\n",
    "        \n",
    "    # On completion of training, save the variables to be fed to custom model\n",
    "    for var_name in variables_to_save:\n",
    "        values[var_name] = sess.run(variables_to_save[var_name])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The 100% accuracy is not an error. It is due to the fact that complete validation set is not being evaluated only part of it is being evaluated and our model got all right answers in that part"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Creating the custom model\n",
    "While creating the custom model, we will need to create all the variables ourself."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "First let's create a function that returns the required mean and variance for the batchnorm layer. Batchnorm layer requires that mean and variance be calculated of every layer except that of the channels layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def bn_mean_variance(input_tensor, axis=-1, keep_dims=True):\n",
    "    shape = len(input_tensor.get_shape())\n",
    "    if axis < 0:\n",
    "        axis += shape\n",
    "    dimension_range = range(shape)\n",
    "    return tf.nn.moments(input_tensor, axes=dimension_range[:axis] + dimension_range[axis+1:],\n",
    "                         keep_dims=keep_dims)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "custom_graph = tf.Graph()\n",
    "with custom_graph.as_default():\n",
    "    alphas_training_operations = []\n",
    "    alphas_variables = []\n",
    "    \n",
    "    # Setting configuration\n",
    "    no_filters_conv1 = 5\n",
    "    no_layers_conv1 = 5\n",
    "    no_filters_conv2 = 5\n",
    "    no_layers_conv2 = 5\n",
    "    \n",
    "    # Inputs\n",
    "    x = tf.placeholder(dtype=tf.float32)\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "    \n",
    "    # Convolution Layer 1\n",
    "    W_conv1 = tf.Variable(values[\"W_conv1\"], name=\"W_conv1\")\n",
    "    b_conv1 = tf.Variable(values[\"b_conv1\"], name=\"b_conv1\")\n",
    "    # Creating new variables\n",
    "    alphas_conv1 = tf.Variable(tf.random_normal(shape=(no_filters_conv1, 1), mean=1.0, stddev=0.1),\n",
    "                               dtype=tf.float32, name=\"alphas_conv1\")\n",
    "    shift_parameters_conv1 = tf.Variable(tf.constant(0., shape=(no_layers_conv1, 1)),\n",
    "                                         dtype=tf.float32, name=\"shift_parameters_conv1\")\n",
    "    betas_conv1 = tf.Variable(tf.constant(1., shape=(no_layers_conv1, 1)),\n",
    "                              dtype=tf.float32, name=\"betas_conv1\")\n",
    "    # Performing the operations\n",
    "    binary_filters_conv1 = get_binary_filters(W_conv1, no_filters_conv1)\n",
    "    alpha_training_conv1, alpha_loss_conv1 = alpha_training(tf.stop_gradient(W_conv1, \"no_gradient_W_conv1\"),\n",
    "                                                            tf.stop_gradient(binary_filters_conv1,\n",
    "                                                                             \"no_gradient_binary_filters_conv1\"),\n",
    "                                                            alphas_conv1, no_filters_conv1)\n",
    "    conv1 = ABC(binary_filters_conv1, tf.stop_gradient(alphas_conv1), shift_parameters_conv1,\n",
    "                betas_conv1, b_conv1, padding=\"SAME\")(x_image)\n",
    "    # Saving the alphas training operation and the variable\n",
    "    alphas_training_operations.append(alpha_training_conv1)\n",
    "    alphas_variables.append(alphas_conv1)\n",
    "    \n",
    "    # Other layers\n",
    "    pool1 = max_pool_2x2(conv1)\n",
    "    # BatchNorm \n",
    "    mean_conv1, variance_conv1 = bn_mean_variance(pool1)\n",
    "    bn_gamma_conv1 = tf.Variable(tf.ones(shape=(32,), dtype=tf.float32), name=\"bn_gamma_conv1\")\n",
    "    bn_beta_conv1 = tf.Variable(tf.zeros(shape=(32,), dtype=tf.float32), name=\"bn_beta_conv1\")\n",
    "    bn_conv1 = tf.nn.batch_normalization(pool1, mean_conv1, variance_conv1,\n",
    "                                         bn_beta_conv1, bn_gamma_conv1, 0.001)\n",
    "    h_conv1 = tf.nn.relu(bn_conv1)\n",
    "\n",
    "    # Convolution Layer 2\n",
    "    W_conv2 = tf.Variable(values[\"W_conv2\"], name=\"W_conv2\")\n",
    "    b_conv2 = tf.Variable(values[\"b_conv2\"], name=\"b_conv2\")\n",
    "    \n",
    "    # Creating new variables\n",
    "    alphas_conv2 = tf.Variable(tf.random_normal(shape=(no_filters_conv2, 1), mean=1.0, stddev=0.1),\n",
    "                               dtype=tf.float32, name=\"alphas_conv2\")\n",
    "    shift_parameters_conv2 = tf.Variable(tf.constant(0., shape=(no_layers_conv2, 1)),\n",
    "                                         dtype=tf.float32, name=\"shift_parameters_conv2\")\n",
    "    betas_conv2 = tf.Variable(tf.constant(1., shape=(no_layers_conv2, 1)),\n",
    "                              dtype=tf.float32, name=\"betas_conv2\")\n",
    "    \n",
    "    # Performing the operations\n",
    "    binary_filters_conv2 = get_binary_filters(W_conv2, no_filters_conv2)\n",
    "    alpha_training_conv2, alpha_loss_conv2 = alpha_training(tf.stop_gradient(W_conv2, \"no_gradient_W_conv2\"),\n",
    "                                                            tf.stop_gradient(binary_filters_conv2,\n",
    "                                                                             \"no_gradient_binary_filters_conv2\"),\n",
    "                                                            alphas_conv2, no_filters_conv2)\n",
    "    conv2 = ABC(binary_filters_conv2, tf.stop_gradient(alphas_conv2), shift_parameters_conv2,\n",
    "                betas_conv2, b_conv2, padding=\"SAME\")(h_conv1)\n",
    "    \n",
    "    # Saving the alphas training operation and the variable\n",
    "    alphas_training_operations.append(alpha_training_conv2)\n",
    "    alphas_variables.append(alphas_conv2)\n",
    "    \n",
    "    # Other layers\n",
    "    pool2 = max_pool_2x2(conv2)\n",
    "    # BatchNorm\n",
    "    mean_conv2, variance_conv2 = bn_mean_variance(pool2)\n",
    "    bn_gamma_conv2 = tf.Variable(tf.ones(shape=(64,), dtype=tf.float32), name=\"bn_gamma_conv2\")\n",
    "    bn_beta_conv2 = tf.Variable(tf.zeros(shape=(64,), dtype=tf.float32), name=\"bn_beta_conv2\")\n",
    "    bn_conv2 = tf.nn.batch_normalization(pool2, mean_conv2, variance_conv2,\n",
    "                                         bn_beta_conv2, bn_gamma_conv2, 0.001)\n",
    "    h_conv2 = tf.nn.relu(bn_conv2)\n",
    "\n",
    "    # Flat the conv2 output\n",
    "    h_conv2_flat = tf.reshape(h_conv2, shape=(-1, 7*7*64))\n",
    "\n",
    "    # Dense layer1\n",
    "    W_fc1 = tf.convert_to_tensor(values[\"W_fc1\"], dtype=tf.float32)\n",
    "    b_fc1 = tf.convert_to_tensor(values[\"b_fc1\"], dtype=tf.float32)\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)\n",
    "\n",
    "    # Dropout\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "    # Output layer\n",
    "    W_fc2 = tf.convert_to_tensor(values[\"W_fc2\"], dtype=tf.float32)\n",
    "    b_fc2 = tf.convert_to_tensor(values[\"b_fc2\"], dtype=tf.float32)\n",
    "    y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n",
    "    \n",
    "    # Labels\n",
    "    y = tf.placeholder(tf.int32, [None])\n",
    "    y_ = tf.one_hot(y, 10)\n",
    "    \n",
    "    # Defining optimizer and loss\n",
    "    cross_entropy = tf.reduce_mean(\n",
    "        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n",
    "    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n",
    "    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    \n",
    "    graph_init = tf.global_variables_initializer()\n",
    "    alphas_init = tf.variables_initializer(alphas_variables)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's create the dictionary of variables to save"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Defining variables to save. These will be fed to our custom layer\n",
    "variables_to_save = {\"W_conv1\": W_conv1,\n",
    "                     \"b_conv1\": b_conv1,\n",
    "                     \"alphas_conv1\": alphas_conv1,\n",
    "                     \"betas_conv1\": betas_conv1,\n",
    "                     \"shift_parameters_conv1\": shift_parameters_conv1,\n",
    "                     \"bn_gamma_conv1\": bn_gamma_conv1,\n",
    "                     \"bn_beta_conv1\": bn_beta_conv1,\n",
    "                     \"W_conv2\": W_conv2,\n",
    "                     \"b_conv2\": b_conv2,\n",
    "                     \"alphas_conv2\": alphas_conv2,\n",
    "                     \"betas_conv2\": betas_conv2,\n",
    "                     \"shift_parameters_conv2\": shift_parameters_conv2,\n",
    "                     \"bn_gamma_conv2\": bn_gamma_conv2,\n",
    "                     \"bn_beta_conv2\": bn_beta_conv2,\n",
    "                     \"W_fc1\": W_fc1,\n",
    "                     \"b_fc1\": b_fc1,\n",
    "                     \"W_fc2\": W_fc2,\n",
    "                     \"b_fc2\": b_fc2}\n",
    "values = {}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 1  Val accuracy: 90.0000%  Loss: 0.314954\n",
      "Epoch: 2  Val accuracy: 76.0000%  Loss: 0.954873\n",
      "Epoch: 3  Val accuracy: 80.0000%  Loss: 0.985948\n",
      "Epoch: 4  Val accuracy: 84.0000%  Loss: 1.012544\n",
      "Epoch: 5  Val accuracy: 78.0000%  Loss: 1.004487\n",
      "CPU times: user 4min 42s, sys: 26.4 s, total: 5min 8s\n",
      "Wall time: 5min 6s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "n_epochs = 5\n",
    "batch_size = 32\n",
    "alpha_training_epochs = 200\n",
    "        \n",
    "with tf.Session(graph=custom_graph) as sess:\n",
    "    sess.run(graph_init)\n",
    "    for epoch in range(n_epochs):\n",
    "        for iteration in range(1, 200 + 1):\n",
    "            # Training alphas\n",
    "            sess.run(alphas_init)\n",
    "            for alpha_training_op in alphas_training_operations:\n",
    "                for alpha_epoch in range(alpha_training_epochs):\n",
    "                    sess.run(alpha_training_op)\n",
    "            \n",
    "            batch = mnist.train.next_batch(50)\n",
    "            \n",
    "            # Run operation and calculate loss\n",
    "            _, loss_train = sess.run([train_step, cross_entropy],\n",
    "                                     feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5})\n",
    "            print(\"\\rIteration: {}/{} ({:.1f}%)  Loss: {:.5f}\".format(\n",
    "                      iteration, 200,\n",
    "                      iteration * 100 / 200,\n",
    "                      loss_train),\n",
    "                  end=\"\")\n",
    "\n",
    "        # At the end of each epoch,\n",
    "        # measure the validation loss and accuracy:\n",
    "        \n",
    "        # Training alphas\n",
    "        sess.run(alphas_init)\n",
    "        for alpha_training_op in alphas_training_operations:\n",
    "            for alpha_epoch in range(alpha_training_epochs):\n",
    "                sess.run(alpha_training_op)\n",
    "                    \n",
    "        loss_vals = []\n",
    "        acc_vals = []\n",
    "        for iteration in range(1, 200 + 1):            \n",
    "            X_batch, y_batch = mnist.validation.next_batch(batch_size)\n",
    "            acc_val, loss_val = sess.run([accuracy, cross_entropy],\n",
    "                                     feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})\n",
    "            loss_vals.append(loss_val)\n",
    "            acc_vals.append(acc_val)\n",
    "            print(\"\\rEvaluating the model: {}/{} ({:.1f}%)\".format(iteration, 200,\n",
    "                iteration * 100 / 200),\n",
    "                  end=\" \" * 10)\n",
    "        loss_val = np.mean(loss_vals)\n",
    "        acc_val = np.mean(acc_vals)\n",
    "        print(\"\\rEpoch: {}  Val accuracy: {:.4f}%  Loss: {:.6f}\".format(\n",
    "            epoch + 1, acc_val * 100, loss_val))\n",
    "        \n",
    "    # On completion of training, save the variables to be fed to custom model\n",
    "    for var_name in variables_to_save:\n",
    "        values[var_name] = sess.run(variables_to_save[var_name])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Now, only the required variables can be saved for inference time. Using the **W_conv1** and **W_conv2**, values for binary filters and alphas can be calculated and those can be used along with **shift_parameters** and **betas** to create ABC layer for inference"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Pure inference testing\n",
    "OK! Let's extract the binary filters and alphas and throw away the weights and test our network. This will ensure that we do not have any bug in the implementation of the ABC layer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Creating graphs for alphas calculation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "alpha1_cal_graph = tf.Graph()\n",
    "with alpha1_cal_graph.as_default():\n",
    "    alphas1 = tf.Variable(tf.random_normal(shape=(no_filters_conv1, 1), mean=1.0, stddev=0.1))\n",
    "    conv_filters1 = tf.placeholder(dtype=tf.float32, shape=(5, 5, 1, 32))\n",
    "    bin_filters1 = get_binary_filters(convolution_filters=conv_filters1,\n",
    "                                     no_filters=no_filters_conv1)\n",
    "    alpha_training_op1, alpha_training_loss1 = alpha_training(conv_filters1, bin_filters1,\n",
    "                                                            alphas1, no_filters_conv1)\n",
    "    al_init1 = tf.global_variables_initializer()\n",
    "    \n",
    "alpha2_cal_graph = tf.Graph()\n",
    "with alpha2_cal_graph.as_default():\n",
    "    alphas2 = tf.Variable(tf.random_normal(shape=(no_filters_conv1, 1), mean=1.0, stddev=0.1))\n",
    "    conv_filters2 = tf.placeholder(dtype=tf.float32, shape=(5, 5, 32, 64))\n",
    "    bin_filters2 = get_binary_filters(convolution_filters=conv_filters2,\n",
    "                                     no_filters=no_filters_conv2)\n",
    "    alpha_training_op2, alpha_training_loss2 = alpha_training(conv_filters2, bin_filters2,\n",
    "                                                            alphas2, no_filters_conv2)\n",
    "    al_init2 = tf.global_variables_initializer()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Calculating alphas and binary filters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with tf.Session(graph=alpha1_cal_graph) as sess:\n",
    "    al_init1.run()\n",
    "    for epoch in range(200):\n",
    "        sess.run(alpha_training_op1, feed_dict={conv_filters1: values[\"W_conv1\"]})\n",
    "    cal_bin_filters, cal_alphas = sess.run([bin_filters1, alphas1], feed_dict={conv_filters1: values[\"W_conv1\"]})\n",
    "    values[\"binary_filters_conv1\"] = cal_bin_filters\n",
    "    values[\"alphas_conv1\"] = cal_alphas\n",
    "\n",
    "with tf.Session(graph=alpha2_cal_graph) as sess:\n",
    "    al_init2.run()\n",
    "    for epoch in range(200):\n",
    "        sess.run(alpha_training_op2, feed_dict={conv_filters2: values[\"W_conv2\"]})\n",
    "    cal_bin_filters, cal_alphas = sess.run([bin_filters2, alphas2], feed_dict={conv_filters2: values[\"W_conv2\"]})\n",
    "    values[\"binary_filters_conv2\"] = cal_bin_filters\n",
    "    values[\"alphas_conv2\"] = cal_alphas"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Building inference model\n",
    "Now, we have all our variables, let's build an inference model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "inference_graph = tf.Graph()\n",
    "with inference_graph.as_default():\n",
    "    # Setting configuration\n",
    "    no_filters_conv1 = 5\n",
    "    no_layers_conv1 = 5\n",
    "    no_filters_conv2 = 5\n",
    "    no_layers_conv2 = 5\n",
    "    \n",
    "    # Inputs\n",
    "    x = tf.placeholder(dtype=tf.float32)\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "    \n",
    "    # Convolution Layer 1\n",
    "    b_conv1 = tf.convert_to_tensor(values[\"b_conv1\"], dtype=tf.float32, name=\"b_conv1\")\n",
    "    alphas_conv1 = tf.convert_to_tensor(values[\"alphas_conv1\"],\n",
    "                                        dtype=tf.float32, name=\"alphas_conv1\")\n",
    "    shift_parameters_conv1 = tf.convert_to_tensor(values[\"shift_parameters_conv1\"],\n",
    "                                                  dtype=tf.float32, name=\"shift_parameters_conv1\")\n",
    "    betas_conv1 = tf.convert_to_tensor(values[\"betas_conv1\"],\n",
    "                                       dtype=tf.float32, name=\"betas_conv1\")\n",
    "    # Performing the operations\n",
    "    binary_filters_conv1 = tf.convert_to_tensor(values[\"binary_filters_conv1\"], dtype=tf.float32,\n",
    "                                                name=\"binary_filters_conv1\")\n",
    "    conv1 = ABC(binary_filters_conv1, tf.stop_gradient(alphas_conv1), shift_parameters_conv1,\n",
    "                betas_conv1, b_conv1, padding=\"SAME\")(x_image)\n",
    "    # Other layers\n",
    "    pool1 = max_pool_2x2(conv1)\n",
    "    # batch norm parameters\n",
    "    mean_conv1, variance_conv1 = bn_mean_variance(pool1)\n",
    "    bn_gamma_conv1 = tf.convert_to_tensor(values[\"bn_gamma_conv1\"], dtype=tf.float32,\n",
    "                                          name=\"bn_gamma_conv1\")\n",
    "    bn_beta_conv1 = tf.convert_to_tensor(values[\"bn_beta_conv1\"], dtype=tf.float32,\n",
    "                                         name=\"bn_beta_conv1\")\n",
    "    bn_conv1 = tf.nn.batch_normalization(pool1, mean_conv1, variance_conv1,\n",
    "                                         bn_beta_conv1, bn_gamma_conv1, 0.001)\n",
    "    h_conv1 = tf.nn.relu(bn_conv1)\n",
    "\n",
    "    # Convolution Layer 2\n",
    "    b_conv2 = tf.convert_to_tensor(values[\"b_conv2\"], dtype=tf.float32, name=\"b_conv2\")\n",
    "    alphas_conv2 = tf.convert_to_tensor(values[\"alphas_conv2\"],\n",
    "                                        dtype=tf.float32, name=\"alphas_conv2\")\n",
    "    shift_parameters_conv2 = tf.convert_to_tensor(values[\"shift_parameters_conv2\"],\n",
    "                                                  dtype=tf.float32, name=\"shift_parameters_conv2\")\n",
    "    betas_conv2 = tf.convert_to_tensor(values[\"betas_conv2\"],\n",
    "                                       dtype=tf.float32, name=\"betas_conv2\")\n",
    "    # Performing the operations\n",
    "    binary_filters_conv2 = tf.convert_to_tensor(values[\"binary_filters_conv2\"], dtype=tf.float32,\n",
    "                                                name=\"binary_filters_conv2\")\n",
    "    conv2 = ABC(binary_filters_conv2, tf.stop_gradient(alphas_conv2), shift_parameters_conv2,\n",
    "                betas_conv2, b_conv2, padding=\"SAME\")(h_conv1)\n",
    "    # Other layers\n",
    "    pool2 = max_pool_2x2(conv2)\n",
    "    # batch norm parameters\n",
    "    mean_conv2, variance_conv2 = bn_mean_variance(pool2)\n",
    "    bn_gamma_conv2 = tf.convert_to_tensor(values[\"bn_gamma_conv2\"], dtype=tf.float32,\n",
    "                                          name=\"bn_gamma_conv2\")\n",
    "    bn_beta_conv2 = tf.convert_to_tensor(values[\"bn_beta_conv2\"], dtype=tf.float32,\n",
    "                                         name=\"bn_beta_conv2\")\n",
    "    bn_conv2 = tf.nn.batch_normalization(pool2, mean_conv2, variance_conv2,\n",
    "                                         bn_beta_conv2, bn_gamma_conv2, 0.001)\n",
    "    h_conv2 = tf.nn.relu(bn_conv2)\n",
    "\n",
    "    # Flat the conv2 output\n",
    "    h_conv2_flat = tf.reshape(h_conv2, shape=(-1, 7*7*64))\n",
    "\n",
    "    # Dense layer1\n",
    "    W_fc1 = tf.convert_to_tensor(values[\"W_fc1\"], dtype=tf.float32)\n",
    "    b_fc1 = tf.convert_to_tensor(values[\"b_fc1\"], dtype=tf.float32)\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)\n",
    "\n",
    "    # Dropout\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "    # Output layer\n",
    "    W_fc2 = tf.convert_to_tensor(values[\"W_fc2\"], dtype=tf.float32)\n",
    "    b_fc2 = tf.convert_to_tensor(values[\"b_fc2\"], dtype=tf.float32)\n",
    "    y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n",
    "    \n",
    "    # Labels\n",
    "    y = tf.placeholder(tf.int32, [None])\n",
    "    y_ = tf.one_hot(y, 10)\n",
    "    \n",
    "    # Defining optimizer and loss\n",
    "    cross_entropy = tf.reduce_mean(\n",
    "        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n",
    "    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's test the inference model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 200  Val accuracy: 78.0000%  Loss: 0.884985\n",
      "CPU times: user 6.03 s, sys: 832 ms, total: 6.86 s\n",
      "Wall time: 5.95 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "with tf.Session(graph=inference_graph) as sess:\n",
    "    loss_vals = []\n",
    "    acc_vals = []\n",
    "    for iteration in range(1, 500 + 1):            \n",
    "        X_batch, y_batch = mnist.validation.next_batch(batch_size)\n",
    "        acc_val, loss_val = sess.run([accuracy, cross_entropy],\n",
    "                                 feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})\n",
    "        loss_vals.append(loss_val)\n",
    "        acc_vals.append(acc_val)\n",
    "        print(\"\\rEvaluating the model: {}/{} ({:.1f}%)\".format(iteration, 500,\n",
    "            iteration * 100 / 500),\n",
    "              end=\" \" * 10)\n",
    "    loss_val = np.mean(loss_vals)\n",
    "    acc_val = np.mean(acc_vals)\n",
    "    print(\"\\rEpoch: {}  Val accuracy: {:.4f}%  Loss: {:.6f}\".format(\n",
    "        epoch + 1, acc_val * 100, loss_val))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tensorflow",
   "language": "python",
   "name": "tensorflow"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
