{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "31.1 Self Driving Car OG",
      "version": "0.3.2",
      "provenance": [],
      "collapsed_sections": [],
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/sharathsrini/Behavioral-Clonning/blob/master/31_1_Self_Driving_Car_OG.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "metadata": {
        "id": "rMnJH2iZLz_F",
        "colab_type": "code",
        "outputId": "65cf662f-3f28-4b35-f759-7917e6812922",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1698
        }
      },
      "cell_type": "code",
      "source": [
        "!wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\n",
        "!tar -xvzf cifar-10-python.tar.gz\n",
        "  \n",
        "import tensorflow as tf\n",
        "import numpy as np\n",
        "import os\n",
        "import matplotlib.pyplot as plt\n",
        "from sklearn.utils import shuffle\n",
        "from sklearn.preprocessing import OneHotEncoder\n",
        "gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)\n",
        "config = tf.ConfigProto()\n",
        "config.gpu_options.allow_growth=True\n",
        "\n",
        "np.random.seed(68)\n",
        "tf.set_random_seed(5678)\n",
        "\n",
        "def tf_log(x): return tf.sigmoid(x)\n",
        "def d_tf_log(x): return tf_log(x) * (1.0 - tf_log(x))\n",
        "\n",
        "def tf_Relu(x): return tf.nn.relu(x)\n",
        "def d_tf_Relu(x): return tf.cast(tf.greater(x,0),dtype=tf.float32)\n",
        "\n",
        "def tf_softmax(x): return tf.nn.softmax(x)\n",
        "\n",
        "# Function to unpcicle\n",
        "def unpickle(file):\n",
        "    import pickle\n",
        "    with open(file, 'rb') as fo:\n",
        "        dict = pickle.load(fo, encoding='bytes')\n",
        "    return dict\n",
        "\n",
        "PathDicom = \"./cifar-10-batches-py/\"\n",
        "lstFilesDCM = []  # create an empty list\n",
        "for dirName, subdirList, fileList in os.walk(PathDicom):\n",
        "    for filename in fileList:\n",
        "        if not \".html\" in filename.lower() and not  \".meta\" in filename.lower():  # check whether the file's DICOM\n",
        "            lstFilesDCM.append(os.path.join(dirName,filename))\n",
        "\n",
        "# Read the data traind and Test\n",
        "batch0 = unpickle(lstFilesDCM[0])\n",
        "batch1 = unpickle(lstFilesDCM[1])\n",
        "batch2 = unpickle(lstFilesDCM[2])\n",
        "batch3 = unpickle(lstFilesDCM[3])\n",
        "batch4 = unpickle(lstFilesDCM[4])\n",
        "\n",
        "onehot_encoder = OneHotEncoder(sparse=True)\n",
        "\n",
        "train_batch = np.vstack((batch0[b'data'],batch1[b'data'],batch2[b'data'],batch3[b'data'],batch4[b'data']))\n",
        "train_label = np.expand_dims(np.hstack((batch0[b'labels'],batch1[b'labels'],batch2[b'labels'],batch3[b'labels'],batch4[b'labels'])).T,axis=1).astype(np.float32)\n",
        "train_label = onehot_encoder.fit_transform(train_label).toarray().astype(np.float32)\n",
        "\n",
        "test_batch = unpickle(lstFilesDCM[5])[b'data']\n",
        "test_label = np.expand_dims(np.array(unpickle(lstFilesDCM[5])[b'labels']),axis=0).T.astype(np.float32)\n",
        "test_label = onehot_encoder.fit_transform(test_label).toarray().astype(np.float32)\n",
        "\n",
        "# Normalize data from 0 to 1\n",
        "train_batch = (train_batch - train_batch.min(axis=0))/(train_batch.max(axis=0)-train_batch.min(axis=0))\n",
        "test_batch = (test_batch - test_batch.min(axis=0))/(test_batch.max(axis=0)-test_batch.min(axis=0))\n",
        "\n",
        "# reshape data\n",
        "train_batch = np.reshape(train_batch,(len(train_batch),3,32,32))\n",
        "test_batch = np.reshape(test_batch,(len(test_batch),3,32,32))\n",
        "\n",
        "# rotate data\n",
        "train_batch = np.rot90(np.rot90(train_batch,1,axes=(1,3)),3,axes=(1,2)).astype(np.float32)\n",
        "test_batch = np.rot90(np.rot90(test_batch,1,axes=(1,3)),3,axes=(1,2)).astype(np.float32)\n",
        "\n",
        "# cnn\n",
        "class CNNLayer():\n",
        "    \n",
        "    def __init__(self,kernel,inchan,outchan):\n",
        "        self.w = tf.Variable(tf.random_normal([kernel,kernel,inchan,outchan]))\n",
        "        self.m,self.v = tf.Variable(tf.zeros_like(self.w)), tf.Variable(tf.zeros_like(self.w))\n",
        "    def getw(self): return self.w\n",
        "    def feedforward(self,input,stride_num=1):\n",
        "        self.input = input\n",
        "        self.layer = tf.nn.conv2d(input,self.w,strides=[1,stride_num,stride_num,1],padding=\"VALID\")\n",
        "        self.layerA = tf_Relu(self.layer)\n",
        "        return self.layerA\n",
        "\n",
        "# fcc\n",
        "class FCCLayer():\n",
        "    \n",
        "    def __init__(self,input,output):\n",
        "        self.w = tf.Variable(tf.random_normal([input,output]))\n",
        "        self.m,self.v = tf.Variable(tf.zeros_like(self.w)), tf.Variable(tf.zeros_like(self.w))\n",
        "    def getw(self): return self.w\n",
        "    def feedforward(self,input):\n",
        "        self.input = input\n",
        "        self.layer = tf.matmul(input,self.w)\n",
        "        self.layerA = tf_log(self.layer)\n",
        "        return self.layerA\n",
        "        \n",
        "# create layers\n",
        "l1 = CNNLayer(5,3,24)\n",
        "l2 = CNNLayer(5,24,36)\n",
        "l3 = CNNLayer(5,36,48)\n",
        "l4 = CNNLayer(3,48,64)\n",
        "l5 = CNNLayer(3,64,64)\n",
        "\n",
        "l6 = FCCLayer(256,1164)\n",
        "l7 = FCCLayer(1164,100)\n",
        "l8 = FCCLayer(100,50)\n",
        "l9 = FCCLayer(50,10)\n",
        "\n",
        "weights = [l1.getw(),l2.getw(),\n",
        "           l3.getw(),l4.getw(),\n",
        "           l5.getw(),l6.getw(),\n",
        "           l7.getw(),l8.getw(),\n",
        "           l9.getw()]\n",
        "\n",
        "# Hyper Param\n",
        "learning_rate = 0.001\n",
        "batch_size = 200\n",
        "num_epoch = 800\n",
        "print_size = 50\n",
        "\n",
        "beta1,beta2 = 0.9,0.999\n",
        "adam_e = 0.00000001\n",
        "\n",
        "proportion_rate = 1000\n",
        "decay_rate = 0.008\n",
        "\n",
        "# Create graph\n",
        "x = tf.placeholder(shape=[None,32,32,3],dtype=tf.float32)\n",
        "y = tf.placeholder(shape=[None,10],dtype=tf.float32)\n",
        "\n",
        "layer1 = l1.feedforward(x,stride_num=2)\n",
        "layer2 = l2.feedforward(layer1)\n",
        "layer3 = l3.feedforward(layer2)\n",
        "\n",
        "layer4 = l4.feedforward(layer3)\n",
        "layer5 = l5.feedforward(layer4)\n",
        "\n",
        "layer6_Input = tf.reshape(layer5,[batch_size,-1])\n",
        "layer6 = l6.feedforward(layer6_Input)\n",
        "layer7 = l7.feedforward(layer6)\n",
        "layer8 = l8.feedforward(layer7)\n",
        "layer9 = l9.feedforward(layer8)\n",
        "\n",
        "final_soft = tf_softmax(layer9)\n",
        "\n",
        "cost = tf.reduce_sum(-1.0 * (y* tf.log(final_soft) + (1-y)*tf.log(1-final_soft)))\n",
        "correct_prediction = tf.equal(tf.argmax(final_soft, 1), tf.argmax(y, 1))\n",
        "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "\n",
        "# auto train\n",
        "auto_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost,var_list=weights)\n",
        "\n",
        "# create Session\n",
        "with tf.Session(config=config) as sess: \n",
        "\n",
        "    sess.run(tf.global_variables_initializer())\n",
        "\n",
        "    train_total_cost,train_total_acc =0,0\n",
        "    train_cost_overtime,train_acc_overtime = [],[]\n",
        "\n",
        "    test_total_cost,test_total_acc = 0,0\n",
        "    test_cost_overtime,test_acc_overtime = [],[]\n",
        "\n",
        "    for iter in range(num_epoch):\n",
        "        \n",
        "        train_batch,train_label = shuffle(train_batch,train_label)\n",
        "\n",
        "        # Train Batch\n",
        "        for current_batch_index in range(0,len(train_batch),batch_size):\n",
        "\n",
        "            current_batch = train_batch[current_batch_index:current_batch_index+batch_size,:,:,:]\n",
        "            current_batch_label = train_label[current_batch_index:current_batch_index+batch_size,:]\n",
        "\n",
        "            sess_results = sess.run( [cost,accuracy,correct_prediction,auto_train], feed_dict= {x:current_batch,y:current_batch_label})\n",
        "            print(\"current iter:\", iter, \" current cost: \", sess_results[0],' current acc: ',sess_results[1], end='\\r')\n",
        "            train_total_cost = train_total_cost + sess_results[0]\n",
        "            train_total_acc = train_total_acc + sess_results[1]\n",
        "\n",
        "        # Test batch\n",
        "        for current_batch_index in range(0,len(test_batch),batch_size):\n",
        "\n",
        "            current_batch = test_batch[current_batch_index:current_batch_index+batch_size,:,:,:]\n",
        "            current_batch_label = test_label[current_batch_index:current_batch_index+batch_size,:]\n",
        "\n",
        "            sess_results = sess.run( [cost,accuracy,correct_prediction], feed_dict= {x:current_batch,y:current_batch_label})\n",
        "            print(\"current iter:\", iter, \" current cost: \", sess_results[0],' current acc: ',sess_results[1], end='\\r')\n",
        "            test_total_cost = test_total_cost + sess_results[0]\n",
        "            test_total_acc = test_total_acc + sess_results[1]\n",
        "\n",
        "        # store\n",
        "        train_cost_overtime.append(train_total_cost/(len(train_batch)/batch_size ) )\n",
        "        train_acc_overtime.append(train_total_acc/(len(train_batch)/batch_size ) )\n",
        "\n",
        "        test_cost_overtime.append(test_total_cost/(len(test_batch)/batch_size ) )\n",
        "        test_acc_overtime.append(test_total_acc/(len(test_batch)/batch_size ) )\n",
        "        \n",
        "        # print\n",
        "        if iter%print_size == 0:\n",
        "            print('\\n=========')\n",
        "            print(\"Avg Train Cost: \", train_cost_overtime[-1])\n",
        "            print(\"Avg Train Acc: \", train_acc_overtime[-1])\n",
        "            print(\"Avg Test Cost: \", test_cost_overtime[-1])\n",
        "            print(\"Avg Test Acc: \", test_acc_overtime[-1])\n",
        "            print('-----------')      \n",
        "        train_total_cost,train_total_acc,test_total_cost,test_total_acc=0,0,0,0            \n",
        "                \n",
        "        \n",
        "\n",
        "# plot and save\n",
        "plt.figure()\n",
        "plt.plot(range(len(train_cost_overtime)),train_cost_overtime,color='y',label='Original Model')\n",
        "plt.legend()\n",
        "plt.savefig('og Train Cost over time')\n",
        "\n",
        "plt.figure()\n",
        "plt.plot(range(len(train_acc_overtime)),train_acc_overtime,color='y',label='Original Model')\n",
        "plt.legend()\n",
        "plt.savefig('og Train Acc over time')\n",
        "\n",
        "plt.figure()\n",
        "plt.plot(range(len(test_cost_overtime)),test_cost_overtime,color='y',label='Original Model')\n",
        "plt.legend()\n",
        "plt.savefig('og Test Cost over time')\n",
        "\n",
        "plt.figure()\n",
        "plt.plot(range(len(test_acc_overtime)),test_acc_overtime,color='y',label='Original Model')\n",
        "plt.legend()\n",
        "plt.savefig('og Test Acc over time')\n",
        "\n",
        "\n",
        "# --- end code --"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "--2018-03-20 12:42:40--  https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\n",
            "Resolving www.cs.toronto.edu (www.cs.toronto.edu)... 128.100.3.30\n",
            "Connecting to www.cs.toronto.edu (www.cs.toronto.edu)|128.100.3.30|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 170498071 (163M) [application/x-gzip]\n",
            "Saving to: ‘cifar-10-python.tar.gz’\n",
            "\n",
            "cifar-10-python.tar 100%[===================>] 162.60M  1.87MB/s    in 24s     \n",
            "\n",
            "2018-03-20 12:43:05 (6.64 MB/s) - ‘cifar-10-python.tar.gz’ saved [170498071/170498071]\n",
            "\n",
            "cifar-10-batches-py/\n",
            "cifar-10-batches-py/data_batch_4\n",
            "cifar-10-batches-py/readme.html\n",
            "cifar-10-batches-py/test_batch\n",
            "cifar-10-batches-py/data_batch_3\n",
            "cifar-10-batches-py/batches.meta\n",
            "cifar-10-batches-py/data_batch_2\n",
            "cifar-10-batches-py/data_batch_5\n",
            "cifar-10-batches-py/data_batch_1\n",
            "current iter: 0  current cost:  648.72205  current acc:  0.13\n",
            "=========\n",
            "Avg Train Cost:  652.5157741699219\n",
            "Avg Train Acc:  0.11894000004231929\n",
            "Avg Test Cost:  649.8954772949219\n",
            "Avg Test Acc:  0.11709999985992908\n",
            "-----------\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 50  current cost:  630.4605  current acc:  0.135\n",
            "=========\n",
            "Avg Train Cost:  628.6582165527344\n",
            "Avg Train Acc:  0.16210000005364417\n",
            "Avg Test Cost:  626.2163586425781\n",
            "Avg Test Acc:  0.1641000020503998\n",
            "-----------\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 100  current cost:  629.9233  current acc:  0.155\n",
            "=========\n",
            "Avg Train Cost:  626.4286984863281\n",
            "Avg Train Acc:  0.1738000003695488\n",
            "Avg Test Cost:  626.1192553710938\n",
            "Avg Test Acc:  0.1769999998807907\n",
            "-----------\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 150  current cost:  625.0927  current acc:  0.165\n",
            "=========\n",
            "Avg Train Cost:  621.65119921875\n",
            "Avg Train Acc:  0.19617999961972238\n",
            "Avg Test Cost:  622.1602697753906\n",
            "Avg Test Acc:  0.1987999999523163\n",
            "-----------\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 200  current cost:  624.2868  current acc:  0.195\n",
            "=========\n",
            "Avg Train Cost:  620.063732421875\n",
            "Avg Train Acc:  0.20159999930858613\n",
            "Avg Test Cost:  620.2621997070313\n",
            "Avg Test Acc:  0.20399999916553496\n",
            "-----------\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 250  current cost:  623.18085  current acc:  0.175\n",
            "=========\n",
            "Avg Train Cost:  618.8992844238281\n",
            "Avg Train Acc:  0.20585999953746795\n",
            "Avg Test Cost:  619.7248840332031\n",
            "Avg Test Acc:  0.20339999973773956\n",
            "-----------\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 300  current cost:  622.3759  current acc:  0.22\n",
            "=========\n",
            "Avg Train Cost:  618.3032998046875\n",
            "Avg Train Acc:  0.20643999940156937\n",
            "Avg Test Cost:  620.0992309570313\n",
            "Avg Test Acc:  0.2062999990582466\n",
            "-----------\n",
            "current iter: "
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 350  current cost:  624.2408  current acc:  0.22\n",
            "=========\n",
            "Avg Train Cost:  617.7118842773438\n",
            "Avg Train Acc:  0.20967999869585038\n",
            "Avg Test Cost:  619.10751953125\n",
            "Avg Test Acc:  0.21039999902248382\n",
            "-----------\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 400  current cost:  622.7089  current acc:  0.17\n",
            "=========\n",
            "Avg Train Cost:  617.3322036132812\n",
            "Avg Train Acc:  0.20865999934077262\n",
            "Avg Test Cost:  619.6877685546875\n",
            "Avg Test Acc:  0.20439999908208847\n",
            "-----------\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 450  current cost:  624.16846  current acc:  0.195\n",
            "=========\n",
            "Avg Train Cost:  616.9997983398438\n",
            "Avg Train Acc:  0.2102999994158745\n",
            "Avg Test Cost:  620.2354943847656\n",
            "Avg Test Acc:  0.20640000015497206\n",
            "-----------\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "current iter: 500  current cost:  623.77637  current acc:  0.22\n",
            "=========\n",
            "Avg Train Cost:  616.5402854003906\n",
            "Avg Train Acc:  0.21085999935865402\n",
            "Avg Test Cost:  619.8216235351563\n",
            "Avg Test Acc:  0.20669999986886978\n",
            "-----------\n"
          ],
          "name": "stdout"
        }
      ]
    }
  ]
}