{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "mnist_sparsity.ipynb",
      "version": "0.3.2",
      "views": {},
      "default_view": {},
      "provenance": [
        {
          "file_id": "1S0zGJ9Mfqros6Mjcre_PLN3f2jq_L0VE",
          "timestamp": 1518035508377
        },
        {
          "file_id": "1iEfLE4FmkiVQ8nx4jUYr6KAuxKTCp5UT",
          "timestamp": 1516826348243
        },
        {
          "file_id": "1IHNqHpSS3WQdTnIAOhWcN0wTjxpRayGT",
          "timestamp": 1516746341574
        },
        {
          "file_id": "17C-t9NvJevuqTAQUEkCpqM50UdCQVIpG",
          "timestamp": 1516601523439
        }
      ],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "metadata": {
        "id": "3y8PjUX1-hrz",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "### *Import necessary packages*\n",
        "\n",
        "---\n",
        "\n"
      ]
    },
    {
      "metadata": {
        "id": "rp0URWpjJ3mn",
        "colab_type": "code",
        "colab": {
          "autoexec": {
            "startup": false,
            "wait_interval": 0
          }
        }
      },
      "cell_type": "code",
      "source": [
        "import functools\n",
        "from apiclient import errors\n",
        "from apiclient.http import MediaFileUpload\n",
        "from __future__ import absolute_import\n",
        "from __future__ import division\n",
        "from __future__ import print_function\n",
        "import argparse\n",
        "import os\n",
        "import sys\n",
        "import tensorflow as tf\n",
        "from tensorflow.examples.tutorials.mnist import input_data\n",
        "FLAGS = None\n",
        "# from IPython.core.debugger import set_trace"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "URABzX60_cKy",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "### *Defining necessary functions for sparsity operations*\n",
        "\n",
        "---\n",
        "\n"
      ]
    },
    {
      "metadata": {
        "id": "5XpL0u7v-RmF",
        "colab_type": "code",
        "colab": {
          "autoexec": {
            "startup": false,
            "wait_interval": 0
          }
        }
      },
      "cell_type": "code",
      "source": [
        "def sparse_fn(weights,threshold=0.001):\n",
        "    # Force weights less than a threshold to zero.\n",
        "    # Set threshold and clip\n",
        "    # This force the weights in range [-threshold,threshold] to be zero.\n",
        "    W_signed = tf.sign(weights)\n",
        "    W_sparse_temp = tf.clip_by_value(tf.subtract(tf.abs(weights),threshold),\\\n",
        "                                     clip_value_min=0,clip_value_max=10000.0)\n",
        "    W_sparse_signed = tf.sign(W_sparse_temp)\n",
        "    W_sparse = W_signed * W_sparse_signed * tf.add(W_sparse_temp,threshold)\n",
        "    return W_sparse\n",
        "  \n",
        "def group_lasso(v):\n",
        "    # Group sparsity loss.\n",
        "    group_loss_all = []\n",
        "    for W in v:\n",
        "        if 'bias' not in W.name:\n",
        "            if 'conv' in W.name:\n",
        "                # Input-channel-wise sparsity\n",
        "                grouped_sum = tf.sqrt(tf.reduce_sum(tf.pow(W,2),axis=[0,1,2]))\n",
        "                group_loss = tf.reduce_sum(grouped_sum)\n",
        "                group_loss_all.append(group_loss)\n",
        "            if 'fc' in W.name:\n",
        "                # Input-channel-wise sparsity\n",
        "                grouped_sum = tf.sqrt(tf.reduce_sum(tf.pow(W,2),axis=[0]))\n",
        "                group_loss = tf.reduce_sum(grouped_sum)\n",
        "                group_loss_all.append(group_loss)\n",
        "\n",
        "    return tf.reduce_sum(group_loss_all)\n",
        "     \n",
        "    \n",
        "def sparsity_calculatior(v):\n",
        "  \n",
        "    # Calculation of the sparsity of the network\n",
        "    sparsity_layers = []\n",
        "    num_params_layers = []\n",
        "    for W in v:\n",
        "          if 'bias' not in W.name:\n",
        "              if 'conv' or 'fc' in W.name:\n",
        "\n",
        "                  # Set threshold and clip\n",
        "                  W_sparse = tf.clip_by_value(tf.subtract(tf.abs(W), FLAGS.sparsity_threshold),\\\n",
        "                                              clip_value_min=0, clip_value_max=10000)\n",
        "\n",
        "                  # Sparsity calculation\n",
        "                  num_nonzero = tf.cast(tf.count_nonzero(W_sparse),tf.float32)\n",
        "                  num_weights = functools.reduce(lambda x, y: x*y, W_sparse.get_shape())\n",
        "                  non_sparsity_level = tf.divide(num_nonzero,tf.cast(num_weights,tf.float32))\n",
        "                  Sparsity = tf.subtract(1.0,non_sparsity_level)\n",
        "\n",
        "                  # Add the sparsity of each layer to the list\n",
        "                  sparsity_layers.append(Sparsity)\n",
        "\n",
        "                  # Add the number of parameters for each layer\n",
        "                  num_params_layers.append(num_nonzero)\n",
        "\n",
        "\n",
        "    return tf.reduce_mean(sparsity_layers), tf.reduce_sum(num_params_layers)\n",
        "                \n",
        "      \n",
        "def group_variance(v):\n",
        "    # Defining the group varianve function for attention-based sparsity.\n",
        "    group_loss_variance = []\n",
        "    for W in v:\n",
        "        if 'bias' not in W.name:\n",
        "            if 'conv' in W.name:\n",
        "                grouped_elements = tf.reduce_sum(tf.pow(W,2),axis=[0,1,2])\n",
        "                coefficient=1.0\n",
        "                group_mean, group_variance = tf.nn.moments(grouped_elements, axes=[0])\n",
        "                variance_loss = tf.divide(1.0, tf.divide(group_variance, coefficient))\n",
        "                group_loss_variance.append(variance_loss)\n",
        "            if 'fc' in W.name:\n",
        "                grouped_elements = tf.reduce_sum(tf.pow(W,2),axis=[0])\n",
        "                coefficient=1.0\n",
        "                group_mean, group_variance = tf.nn.moments(grouped_elements, axes=[0])\n",
        "                variance_loss = tf.divide(1.0, tf.divide(group_variance, coefficient))\n",
        "                group_loss_variance.append(variance_loss)\n",
        "\n",
        "    return tf.reduce_sum(group_loss_variance)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "39VBweD7_ovp",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "###* Main training function* - Including Network Architecture + functions for creating network model and weights\n",
        "\n",
        "---\n",
        "\n"
      ]
    },
    {
      "metadata": {
        "id": "iiHHYXgbK5eu",
        "colab_type": "code",
        "colab": {
          "autoexec": {
            "startup": false,
            "wait_interval": 0
          }
        }
      },
      "cell_type": "code",
      "source": [
        "def train():\n",
        "  \n",
        "    # Import MNIST data\n",
        "    mnist = input_data.read_data_sets(FLAGS.data_dir,\n",
        "                                      fake_data=FLAGS.fake_data)\n",
        "\n",
        "    sess = tf.InteractiveSession()\n",
        "    # Create a multilayer model.\n",
        "\n",
        "    # Input placeholders\n",
        "    with tf.name_scope('input'):\n",
        "        x = tf.placeholder(tf.float32, [None, 784], name='x-input')\n",
        "        y_ = tf.placeholder(tf.int64, [None], name='y-input')\n",
        "\n",
        "    with tf.name_scope('input_reshape'):\n",
        "        image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])\n",
        "        tf.summary.image('input', image_shaped_input, 10)\n",
        "    \n",
        "    with tf.name_scope('dropout'):\n",
        "        keep_prob = tf.placeholder(tf.float32)\n",
        "        tf.summary.scalar('dropout_keep_probability', keep_prob)\n",
        "    \n",
        "    with tf.name_scope('training_status'):\n",
        "        training_status = tf.placeholder(tf.bool)\n",
        "    \n",
        "    # We can't initialize these variables to 0 - the network will get stuck.\n",
        "    def weight_variable(shape):\n",
        "        \"\"\"Create a weight variable with appropriate initialization.\"\"\"\n",
        "        initial = tf.truncated_normal(shape, stddev=0.1)\n",
        "        return tf.Variable(initial)\n",
        "\n",
        "    def bias_variable(shape):\n",
        "        \"\"\"Create a bias variable with appropriate initialization.\"\"\"\n",
        "        initial = tf.constant(0.1, shape=shape)\n",
        "        return tf.Variable(initial)\n",
        "\n",
        "    def variable_summaries(var):\n",
        "        \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n",
        "        with tf.name_scope('summaries'):\n",
        "            mean = tf.reduce_mean(var)\n",
        "            tf.summary.scalar('mean', mean)\n",
        "            with tf.name_scope('stddev'):\n",
        "                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n",
        "            tf.summary.scalar('stddev', stddev)\n",
        "            tf.summary.scalar('max', tf.reduce_max(var))\n",
        "            tf.summary.scalar('min', tf.reduce_min(var))\n",
        "\n",
        "    def conv2d(x, W, padding='SAME'):\n",
        "        \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n",
        "        return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)\n",
        "\n",
        "    def max_pool_2x2(x):\n",
        "        \"\"\"max_pool_2x2 downsamples a feature map by 2X.\"\"\"\n",
        "        return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n",
        "                              strides=[1, 2, 2, 1], padding='SAME')\n",
        "\n",
        "\n",
        "    def nn_layer(input_tensor, input_dim, output_dim, layer_name, training_status=True,\\\n",
        "                 act=tf.nn.relu):\n",
        "        \"\"\"Reusable code for making a simple neural net layer.\n",
        "\n",
        "        It does a matrix multiply, bias add, and then uses ReLU to nonlinearize.\n",
        "        It also sets up name scoping so that the resultant graph is easy to read,\n",
        "        and adds a number of summary ops.\n",
        "        \"\"\"\n",
        "        # Adding a name scope ensures logical grouping of the layers in the graph.\n",
        "        with tf.name_scope(layer_name):\n",
        "            # This Variable will hold the state of the weights for the layer\n",
        "            with tf.name_scope('weights'):\n",
        "                weights = weight_variable([input_dim, output_dim])\n",
        "\n",
        "                # Get the general summaries\n",
        "                variable_summaries(weights)\n",
        "            with tf.name_scope('biases'):\n",
        "                biases = bias_variable([output_dim])\n",
        "                variable_summaries(biases)\n",
        "            with tf.name_scope('Wx_plus_b'):\n",
        "\n",
        "                # At evaluation time, some weights are forced to be zero with the sparsity criterion.\n",
        "                weights = tf.cond(training_status,\n",
        "                   true_fn = lambda: sparse_fn(weights,threshold=0.0),\n",
        "                   false_fn = lambda: sparse_fn(weights,threshold=FLAGS.sparsity_threshold))\n",
        "\n",
        "                preactivate = tf.matmul(input_tensor, weights) + biases\n",
        "                # tf.summary.histogram('pre_activations', preactivate)\n",
        "\n",
        "            # Activation summary\n",
        "            activations = act(preactivate, name='activation')\n",
        "            tf.summary.scalar('sparsity', tf.nn.zero_fraction(activations))\n",
        "            tf.summary.histogram('activations', activations)\n",
        "\n",
        "\n",
        "            # Overall neurons\n",
        "            neurons = tf.reduce_sum(tf.abs(weights), axis=1)\n",
        "            tf.summary.histogram('neurons', neurons)\n",
        "\n",
        "\n",
        "            return activations\n",
        "\n",
        "    def nn_conv_layer(input_tensor, w_shape, b_shape, layer_name, padding='SAME', \\\n",
        "                      training_status=True, act=tf.nn.relu):\n",
        "        \"\"\"Reusable code for making a simple neural net layer.\n",
        "\n",
        "        It does a matrix multiply, bias add, and then uses ReLU to nonlinearize.\n",
        "        It also sets up name scoping so that the resultant graph is easy to read,\n",
        "        and adds a number of summary ops.\n",
        "        \"\"\"\n",
        "        # Adding a name scope ensures logical grouping of the layers in the graph.\n",
        "        with tf.name_scope(layer_name):\n",
        "            # This Variable will hold the state of the weights for the layer\n",
        "            with tf.name_scope('weights'):\n",
        "                weights = weight_variable(w_shape)\n",
        "                variable_summaries(weights)\n",
        "            with tf.name_scope('biases'):\n",
        "                biases = bias_variable(b_shape)\n",
        "                variable_summaries(biases)\n",
        "            with tf.name_scope('Wx_plus_b'):\n",
        "\n",
        "                # At evaluation time, some weights are forced to be zero with the sparsity criterion.\n",
        "                weights = tf.cond(training_status,\n",
        "                   true_fn = lambda: sparse_fn(weights,threshold=0.0),\n",
        "                   false_fn = lambda: sparse_fn(weights,threshold=FLAGS.sparsity_threshold))\n",
        "\n",
        "                preactivate = conv2d(input_tensor, weights,padding) + biases\n",
        "                # tf.summary.histogram('pre_activations', preactivate)\n",
        "\n",
        "            activations = act(preactivate, name='activation')\n",
        "            # tf.summary.histogram('activations', activations)\n",
        "            return activations\n",
        "    \n",
        "    def net(x,training_status):\n",
        "\n",
        "        with tf.name_scope('reshape'):\n",
        "            x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
        "\n",
        "        h_conv1 = nn_conv_layer(x_image, [5, 5, 1, 64], [64], 'conv1', \\\n",
        "                                training_status=training_status, act=tf.nn.relu)\n",
        "\n",
        "        with tf.name_scope('pool1'):\n",
        "            h_pool1 = max_pool_2x2(h_conv1)\n",
        "\n",
        "        h_conv2 = nn_conv_layer(h_pool1, [5, 5, 64, 128], [128], 'conv2',\\\n",
        "                                training_status=training_status, act=tf.nn.relu)\n",
        "\n",
        "        # Second pooling layer.\n",
        "        with tf.name_scope('pool2'):\n",
        "            h_pool2 = max_pool_2x2(h_conv2)\n",
        "\n",
        "        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 128])\n",
        "\n",
        "        h_fc1 = nn_layer(h_pool2_flat, 7 * 7 * 128, 512, 'fc1', \\\n",
        "                         training_status=training_status, act=tf.nn.relu)\n",
        "        dropped_h_fc1 = tf.nn.dropout(h_fc1, keep_prob)\n",
        "\n",
        "        h_fc2 = nn_layer(dropped_h_fc1, 512, 256, 'fc2', \\\n",
        "                         training_status=training_status, act=tf.nn.relu)\n",
        "        dropped_h_fc2 = tf.nn.dropout(h_fc2, keep_prob)\n",
        "\n",
        "        # Do not apply softmax activation yet, see below.\n",
        "        output = nn_layer(dropped_h_fc2, 256, 10, 'softmax', \\\n",
        "                          training_status=training_status, act=tf.identity)\n",
        "\n",
        "        return output, keep_prob\n",
        "\n",
        "    # Network\n",
        "    output, keep_prob = net(x,training_status)\n",
        "\n",
        "    with tf.name_scope('cross_entropy'):\n",
        "        with tf.name_scope('total'):\n",
        "            cross_entropy = tf.losses.sparse_softmax_cross_entropy(\n",
        "                labels=y_, logits=output)\n",
        "    tf.summary.scalar('cross_entropy', cross_entropy)\n",
        "\n",
        "    #############################\n",
        "    ########## LOSS #############\n",
        "    #############################\n",
        "\n",
        "    # Get all trainable variables except biases\n",
        "    trainable_variables = tf.trainable_variables()\n",
        "\n",
        "    # Compute the regularization term\n",
        "    with tf.name_scope('group_lasso'):\n",
        "        lasso_loss = 0.001 * group_lasso(trainable_variables)\n",
        "\n",
        "    with tf.name_scope('group_variance'):\n",
        "        variance_loss = 0.01 * group_variance(trainable_variables)\n",
        "\n",
        "    with tf.name_scope('group_lasso_invert'):\n",
        "        lasso_loss_invert = 10.0 * tf.divide(1,group_lasso(trainable_variables))\n",
        "\n",
        "    tf.losses.add_loss(\n",
        "        lasso_loss,\n",
        "        loss_collection=tf.GraphKeys.LOSSES\n",
        "    )\n",
        "\n",
        "    tf.losses.add_loss(\n",
        "        variance_loss,\n",
        "        loss_collection=tf.GraphKeys.LOSSES\n",
        "    )\n",
        "\n",
        "    # Compute the regularization term\n",
        "    with tf.name_scope('Sparsity'):\n",
        "        sparsity, num_params = sparsity_calculatior(trainable_variables)\n",
        "    \n",
        "    tf.summary.scalar('sparsity', sparsity)\n",
        "\n",
        "    ###############################################\n",
        "    ############### Total Loss  ###################\n",
        "    ###############################################\n",
        "\n",
        "    total_loss = tf.losses.get_total_loss(add_regularization_losses=True, name='total_loss')\n",
        "    list_losses = tf.losses.get_losses(loss_collection=tf.GraphKeys.LOSSES)\n",
        "    reg_losses = tf.losses.get_losses(loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)\n",
        "\n",
        "\n",
        "    with tf.name_scope('train'):\n",
        "        train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(\n",
        "            total_loss)\n",
        "\n",
        "    with tf.name_scope('accuracy'):\n",
        "        with tf.name_scope('correct_prediction'):\n",
        "            correct_prediction = tf.equal(tf.argmax(output, 1), y_)\n",
        "        with tf.name_scope('accuracy'):\n",
        "            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "    tf.summary.scalar('accuracy', accuracy)\n",
        "\n",
        "    # Merge all the summaries and write them out to\n",
        "    # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)\n",
        "    merged = tf.summary.merge_all()\n",
        "    train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)\n",
        "    test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')\n",
        "    tf.global_variables_initializer().run()\n",
        "\n",
        "    # Train the model, and also write summaries.\n",
        "    def feed_dict(train):\n",
        "        \"\"\"Make a TensorFlow feed_dict: maps data onto Tensor placeholders.\"\"\"\n",
        "        if train or FLAGS.fake_data:\n",
        "            is_train = True\n",
        "            xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)\n",
        "            k = FLAGS.dropout\n",
        "        else:\n",
        "            is_train = False\n",
        "            xs, ys = mnist.test.next_batch(1000)\n",
        "            k = 1.0\n",
        "        return {x: xs, y_: ys, keep_prob: k, training_status:is_train}\n",
        "\n",
        "    for i in range(1, FLAGS.max_steps):\n",
        "\n",
        "        if i % 100 == 0:  # Record summaries and test-set accuracy\n",
        "            summary, acc,sparsity_value, num_parameters = sess.run([merged, \\\n",
        "                       accuracy,sparsity, num_params], feed_dict=feed_dict(False))\n",
        "            test_writer.add_summary(summary, i)\n",
        "            print('Accuracy and Sparsity at step %s: %s , %s\\n number of parameters= %s' % (i, \\\n",
        "                                                           acc, sparsity_value,num_parameters))\n",
        "\n",
        "\n",
        "        else:  # Record a summary\n",
        "            summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))\n",
        "            train_writer.add_summary(summary, i)\n",
        "            \n",
        "    train_writer.close()\n",
        "    test_writer.close()\n",
        "\n",
        "\n",
        "  \n",
        "    "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "WXp3FUPxAoOX",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "### *Main function*\n",
        "\n",
        "---\n",
        "\n"
      ]
    },
    {
      "metadata": {
        "id": "AWNVWvpXAiZ_",
        "colab_type": "code",
        "colab": {
          "autoexec": {
            "startup": false,
            "wait_interval": 0
          }
        }
      },
      "cell_type": "code",
      "source": [
        "def main(_):\n",
        "    if tf.gfile.Exists(FLAGS.log_dir):\n",
        "        tf.gfile.DeleteRecursively(FLAGS.log_dir)\n",
        "    tf.gfile.MakeDirs(FLAGS.log_dir)\n",
        "    train()\n",
        "\n",
        "if __name__ == '__main__':\n",
        "    parser = argparse.ArgumentParser()\n",
        "    parser.add_argument('--fake_data', nargs='?', const=True, type=bool,\n",
        "                        default=False,\n",
        "                        help='If true, uses fake data for unit testing.')\n",
        "    parser.add_argument('--max_steps', type=int, default=300000,\n",
        "                        help='Number of steps to run trainer.')\n",
        "    parser.add_argument('--learning_rate', type=float, default=0.0001,\n",
        "                        help='Initial learning rate')\n",
        "    parser.add_argument('--sparsity_threshold', type=float, default=0.001,\n",
        "                        help='Initial learning rate')\n",
        "    parser.add_argument('--dropout', type=float, default=0.8,\n",
        "                        help='Keep probability for training dropout.')\n",
        "    parser.add_argument(\n",
        "        '--data_dir',\n",
        "        type=str,\n",
        "        default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),\n",
        "                             'tensorflow/mnist/input_data'),\n",
        "        help='Directory for storing input data')\n",
        "    parser.add_argument(\n",
        "        '--log_dir',\n",
        "        type=str,\n",
        "        default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),\n",
        "                             'tensorflow/mnist/logs/mnist_sparsity'),\n",
        "        help='Summaries log directory')\n",
        "    FLAGS, unparsed = parser.parse_known_args()\n",
        "    tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)"
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}