{
 "metadata": {
  "name": "",
  "signature": "sha256:e674afd5acd25e3059053467d2bb72b4ce6a8d3dfcd9b39924bf576a9a652cd3"
 },
 "nbformat": 3,
 "nbformat_minor": 0,
 "worksheets": [
  {
   "cells": [
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# extract the variables that would be used for KITTI-finetuning\n",
      "\n",
      "import tensorflow as tf\n",
      "import numpy as np\n",
      "import os\n",
      "import scipy.io\n",
      "import glob\n",
      "import random\n",
      "import BatchDatsetReader as dataset\n",
      "import scipy.misc as misc\n",
      "from six.moves import cPickle as pickle\n",
      "\n",
      "# reset the graph\n",
      "tf.reset_default_graph()\n",
      "\n",
      "# reset tf.flags.FLAGS\n",
      "import argparse\n",
      "tf.reset_default_graph()\n",
      "tf.flags.FLAGS = tf.python.platform.flags._FlagValues()\n",
      "tf.flags._global_parser = argparse.ArgumentParser()\n",
      "\n",
      "# set tf.flags.FLAGS\n",
      "FLAGS = tf.flags.FLAGS\n",
      "tf.flags.DEFINE_integer(\"batch_size\",\"2\",\"batch size for training\")\n",
      "tf.flags.DEFINE_string(\"logs_dir\",\"logs/\",\"path to logs directory\")\n",
      "tf.flags.DEFINE_string(\"data_dir\",\"Data_zoo/MIT_SceneParsing/\",\"path to dataset\")\n",
      "tf.flags.DEFINE_string(\"pickle_name\",\"MITSceneParsing.pickle\",\"pickle file of the data\")\n",
      "tf.flags.DEFINE_string(\"data_url\",\"http://sceneparsing.csail.mit.edu/data/ADEChallengeData2016.zip\",\"url of the data\")\n",
      "tf.flags.DEFINE_float(\"learning_rate\",\"1e-4\",\"learning rate for the optimizier\")\n",
      "tf.flags.DEFINE_string(\"model_dir\",\"Model_zoo/\",\"path to vgg model mat\")\n",
      "tf.flags.DEFINE_bool(\"debug\",\"True\",\"Debug model: True/False\")\n",
      "tf.flags.DEFINE_string(\"mode\",\"train\",\"Mode: train/ valid\")\n",
      "tf.flags.DEFINE_integer(\"max_iters\",\"100001\",\"max training iterations of batches\")\n",
      "tf.flags.DEFINE_integer(\"num_classes\",\"151\",\"mit_sceneparsing with (150+1) classes\")\n",
      "tf.flags.DEFINE_integer(\"image_size\",\"224\",\"can be variable in deed\")\n",
      "tf.flags.DEFINE_string(\"model_weights\",\"http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat\",\"pretrained weights of the CNN in use\")\n",
      "tf.flags.DEFINE_string(\"full_model\",\"full_model/\",\"trained parameters of the whole network\")\n",
      "tf.flags.DEFINE_string(\"full_model_file\",\"100000_model.ckpt\",\"pretrained parameters of the whole network\")\n",
      "tf.flags.DEFINE_bool(\"load\",\"True\",\"load in pretrained parameters\")\n",
      "\n",
      "# check if the CNN weights folder exist\n",
      "if not os.path.exists(FLAGS.model_dir):\n",
      "    os.makedirs(FLAGS.model_dir)\n",
      "\n",
      "# check if the CNN weights file exist\n",
      "weights_file = os.path.join(FLAGS.model_dir,FLAGS.model_weights.split('/')[-1])\n",
      "if not os.path.exists(weights_file):\n",
      "    print(\"\\ndownloading \"+weights_file+\" ...\")\n",
      "    os.system(\"wget \"+FLAGS.model_weights+\" -P \"+FLAGS.model_dir)\n",
      "    print(\"download finished!\\n\")\n",
      "else:\n",
      "    print(\"\\n\"+weights_file+\" has already been downloaded.\\n\")\n",
      "\n",
      "# load the weights file\n",
      "print(\"\\nloading pretrained weights from: \"+weights_file)\n",
      "pretrain_weights = scipy.io.loadmat(weights_file)\n",
      "print(\"loading finished!\\n\")\n",
      "\n",
      "# the mean RGB\n",
      "mean = pretrain_weights['normalization'][0][0][0] # shape(224,224,3)\n",
      "mean_pixel = np.mean(mean,axis=(0,1)) # average on (height,width) to compute the mean RGB   \n",
      "\n",
      "# the weights and biases\n",
      "weights_biases = np.squeeze(pretrain_weights['layers'])\n",
      "\n",
      "# network input data\n",
      "dropout_prob = tf.placeholder(tf.float32,name=\"dropout_probability\")\n",
      "images = tf.placeholder(tf.float32,shape=[None,FLAGS.image_size,FLAGS.image_size,3],name=\"input_images\")\n",
      "annotations = tf.placeholder(tf.int32,shape=[None,FLAGS.image_size,FLAGS.image_size,1],name=\"input_annotations\")\n",
      "\n",
      "# subtract the mean image\n",
      "processed_image = images - mean_pixel\n",
      "\n",
      "# construct the semantic_seg network\n",
      "with tf.variable_scope(\"semantic_seg\"):\n",
      "    # convs of the vgg net\n",
      "    net = {}\n",
      "    layers = [\n",
      "        'conv1_1','relu1_1','conv1_2','relu1_2','pool1',\n",
      "        'conv2_1','relu2_1','conv2_2','relu2_2','pool2',\n",
      "        'conv3_1','relu3_1','conv3_2','relu3_2','conv3_3','relu3_3','conv3_4','relu3_4','pool3',\n",
      "        'conv4_1','relu4_1','conv4_2','relu4_2','conv4_3','relu4_3','conv4_4','relu4_4','pool4',\n",
      "        'conv5_1','relu5_1','conv5_2','relu5_2','conv5_3' #,'relu5_3','conv5_4','relu5_4','pool5'\n",
      "    ]\n",
      "    current = processed_image\n",
      "\n",
      "    # sanity check\n",
      "    print(\"processed_image: {}\".format(processed_image.get_shape()))\n",
      "\n",
      "    for i,name in enumerate(layers):\n",
      "        type = name[:4]\n",
      "        if type == 'conv':\n",
      "            # matconvnet weights: (width, height, in_channels, out_channels)\n",
      "            # tensorflow weights: (height, width, in_channels, out_channels)\n",
      "            weights, biases = weights_biases[i][0][0][0][0]\n",
      "\n",
      "            weights = np.transpose(weights,(1,0,2,3)) \n",
      "            biases = np.squeeze(biases)\n",
      "            \n",
      "            init = tf.constant_initializer(weights,dtype=tf.float32)\n",
      "            weights = tf.get_variable(initializer=init,shape=weights.shape,name=name+\"_w\")\n",
      "            \n",
      "            init = tf.constant_initializer(biases,dtype=tf.float32)\n",
      "            biases = tf.get_variable(initializer=init,shape=biases.shape,name=name+\"_b\")\n",
      "            \n",
      "            current = tf.nn.conv2d(current,weights,strides=[1,1,1,1],padding=\"SAME\")\n",
      "            current = tf.nn.bias_add(current,biases,name=name)\n",
      "\n",
      "            # sanity check\n",
      "            print(\"{}: {}\".format(name,current.get_shape()))\n",
      "        elif type == 'relu':\n",
      "            current = tf.nn.relu(current,name=name)\n",
      "            if FLAGS.debug:\n",
      "                tf.histogram_summary(current.op.name+\"/activation\",current)\n",
      "                tf.scalar_summary(current.op.name+\"/sparsity\",tf.nn.zero_fraction(current))\n",
      "            # sanity check\n",
      "            print(\"{}: {}\".format(name,current.get_shape()))        \n",
      "        elif type == 'pool':\n",
      "            if name == 'pool5':\n",
      "                current = tf.nn.max_pool(current,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\",name=name)\n",
      "            else:\n",
      "                current = tf.nn.avg_pool(current,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\",name=name)\n",
      "            # sanity check\n",
      "            print(\"{}: {}\".format(name,current.get_shape()))\n",
      "        net[name] = current\n",
      "             \n",
      "    net['pool5'] = tf.nn.max_pool(net['conv5_3'],ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\",name=name)\n",
      "    # sanity check\n",
      "    print(\"pool5: {}\".format(net['pool5'].get_shape()))\n",
      "    \n",
      "     # fcn6\n",
      "    init = tf.truncated_normal(shape=[7,7,512,4096],stddev=0.02)\n",
      "    fcn6_w = tf.get_variable(initializer=init,name=\"fcn6_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[4096])\n",
      "    fcn6_b = tf.get_variable(initializer=init,name=\"fcn6_b\")\n",
      "\n",
      "    fcn6 = tf.nn.conv2d(net['pool5'],fcn6_w,strides=[1,1,1,1],padding=\"SAME\")\n",
      "    fcn6 = tf.nn.bias_add(fcn6,fcn6_b,name=\"fcn6\")\n",
      "\n",
      "    relu6 = tf.nn.relu(fcn6,name=\"relu6\")\n",
      "    if FLAGS.debug:\n",
      "        tf.histogram_summary(\"relu6/activation\", relu6, collections=None, name=None)\n",
      "        tf.scalar_summary(\"relu6/sparsity\", tf.nn.zero_fraction(relu6), collections=None, name=None)\n",
      "    dropout6 = tf.nn.dropout(relu6, keep_prob=dropout_prob, noise_shape=None, seed=None, name=\"dropout6\")\n",
      "    # sanity check\n",
      "    print(\"dropout6: {}\".format(dropout6.get_shape()))\n",
      "\n",
      "     # fcn7\n",
      "    init = tf.truncated_normal(shape=[1,1,4096,4096],stddev=0.02)\n",
      "    fcn7_w = tf.get_variable(initializer=init,name=\"fcn7_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[4096])\n",
      "    fcn7_b = tf.get_variable(initializer=init,name=\"fcn7_b\")\n",
      "\n",
      "    fcn7 = tf.nn.conv2d(dropout6, fcn7_w, strides=[1,1,1,1], padding=\"SAME\", use_cudnn_on_gpu=None, data_format=None, name=None)\n",
      "    fcn7 = tf.nn.bias_add(fcn7, fcn7_b, data_format=None, name=\"fcn7\")\n",
      "\n",
      "    relu7 = tf.nn.relu(fcn7,name=\"relu7\")\n",
      "    if FLAGS.debug:\n",
      "        tf.histogram_summary(\"relu7/activation\", relu7, collections=None, name=None)\n",
      "        tf.scalar_summary(\"relu7/sparsity\", tf.nn.zero_fraction(relu7), collections=None, name=None)\n",
      "    dropout7 = tf.nn.dropout(relu7, keep_prob=dropout_prob, noise_shape=None, seed=None, name=\"dropout7\")\n",
      "    # sanity check\n",
      "    print(\"dropout7: {}\".format(dropout7.get_shape()))\n",
      "\n",
      "    # fcn8\n",
      "    init = tf.truncated_normal(shape=[1,1,4096,FLAGS.num_classes],stddev=0.02)\n",
      "    fcn8_w = tf.get_variable(initializer=init,name=\"fcn8_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[FLAGS.num_classes])\n",
      "    fcn8_b = tf.get_variable(initializer=init,name=\"fcn8_b\")\n",
      "\n",
      "    fcn8 = tf.nn.conv2d(dropout7, fcn8_w, strides=[1,1,1,1], padding=\"SAME\", use_cudnn_on_gpu=None, data_format=None, name=None)\n",
      "    fcn8 = tf.nn.bias_add(fcn8, fcn8_b, data_format=None, name=\"fcn8\")\n",
      "    # sanity check\n",
      "    print(\"fcn8: {}\".format(fcn8.get_shape()))\n",
      "\n",
      "    # deconv1 + net['pool4']: x32 -> x16\n",
      "    s = 2\n",
      "    k = 2*s\n",
      "    in_channel = FLAGS.num_classes\n",
      "    out_channel = net['pool4'].get_shape()[3].value\n",
      "    out_shape = tf.shape(net['pool4'])\n",
      "\n",
      "    init = tf.truncated_normal(shape=[k,k,out_channel,in_channel],stddev=0.02)\n",
      "    deconv1_w = tf.get_variable(initializer=init,name=\"deconv1_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[out_channel])\n",
      "    deconv1_b = tf.get_variable(initializer=init,name=\"deconv1_b\")\n",
      "\n",
      "    # sanity check\n",
      "    print(\"deconv1 output_shape: {}\".format(net['pool4'].get_shape()))\n",
      "\n",
      "    deconv1 = tf.nn.conv2d_transpose(fcn8, deconv1_w, output_shape=out_shape, strides=[1,s,s,1], padding='SAME', name=None)\n",
      "    deconv1 = tf.nn.bias_add(deconv1, deconv1_b, data_format=None, name=\"deconv1\")\n",
      "\n",
      "    fuse1 = tf.add(deconv1, net['pool4'], name=\"fuse1\")\n",
      "    \n",
      "    # deconv2 + net['pool3']: x16 -> x8\n",
      "    s = 2\n",
      "    k = 2*s\n",
      "    in_channel = out_channel\n",
      "    out_channel = net['pool3'].get_shape()[3].value\n",
      "    out_shape = tf.shape(net['pool3'])\n",
      "\n",
      "    init = tf.truncated_normal(shape=[k,k,out_channel,in_channel],stddev=0.02)\n",
      "    deconv2_w = tf.get_variable(initializer=init,name=\"deconv2_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[out_channel])\n",
      "    deconv2_b = tf.get_variable(initializer=init,name=\"deconv2_b\")\n",
      "\n",
      "    deconv2 = tf.nn.conv2d_transpose(fuse1, deconv2_w, output_shape=out_shape, strides=[1,s,s,1], padding='SAME', name=None)\n",
      "    deconv2 = tf.nn.bias_add(deconv2, deconv2_b, data_format=None, name=\"deconv2\")\n",
      "\n",
      "    fuse2 = tf.add(deconv2,net['pool3'],name=\"fuse2\")\n",
      "\n",
      "    # deconv3: x8 -> image_size\n",
      "    s = 8\n",
      "    k = 2*s\n",
      "    in_channel = out_channel\n",
      "    out_channel = FLAGS.num_classes\n",
      "    out_shape = tf.pack([tf.shape(processed_image)[0],tf.shape(processed_image)[1],tf.shape(processed_image)[2],out_channel])\n",
      "            \n",
      "    init = tf.truncated_normal(shape=[k,k,out_channel,in_channel],stddev=0.02)\n",
      "    deconv3_w = tf.get_variable(initializer=init,name=\"deconv3_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[out_channel])\n",
      "    deconv3_b = tf.get_variable(initializer=init,name=\"deconv3_b\")\n",
      "\n",
      "    deconv3 = tf.nn.conv2d_transpose(fuse2, deconv3_w, output_shape=out_shape, strides=[1,s,s,1], padding='SAME', name=None)\n",
      "    deconv3 = tf.nn.bias_add(deconv3, deconv3_b, data_format=None, name=\"deconv3\")\n",
      "\n",
      "    # per-pixel prediction\n",
      "    annotations_pred = tf.argmax(deconv3, dimension=3, name=None)\n",
      "    annotations_pred = tf.expand_dims(annotations_pred, dim=3, name=\"prediction\")\n",
      "\n",
      "# log images, annotations, annotations_pred\n",
      "tf.image_summary(\"images\", images, max_images=2, collections=None, name=None)\n",
      "tf.image_summary(\"annotations\", tf.cast(annotations,tf.uint8), max_images=2, collections=None, name=None)\n",
      "tf.image_summary(\"annotations_pred\", tf.cast(annotations_pred,tf.uint8), max_images=2, collections=None, name=None)\n",
      "\n",
      "# construct the loss\n",
      "loss = tf.nn.sparse_softmax_cross_entropy_with_logits(deconv3, tf.squeeze(annotations, squeeze_dims=[3]))\n",
      "loss = tf.reduce_mean(loss, reduction_indices=None, keep_dims=False, name=\"pixel-wise_cross-entropy_loss\")\n",
      "\n",
      "# log the loss\n",
      "tf.scalar_summary(\"pixel-wise_cross-entropy_loss\", loss, collections=None, name=None)\n",
      "\n",
      "# log all the trainable variables\n",
      "trainabel_vars = tf.trainable_variables()\n",
      "if FLAGS.debug:\n",
      "    for var in trainabel_vars:\n",
      "        tf.histogram_summary(var.op.name+\"/values\", var, collections=None, name=None)\n",
      "        tf.add_to_collection(\"sum(t ** 2) / 2 of all trainable_vars\", tf.nn.l2_loss(var))\n",
      "        \n",
      "# construct the optimizier\n",
      "optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n",
      "gradients = optimizer.compute_gradients(loss,trainabel_vars)\n",
      "if FLAGS.debug:\n",
      "    # log the gradients\n",
      "    for grad, var in gradients:\n",
      "        tf.histogram_summary(var.op.name+\"/gradients\", grad, collections=None, name=None)\n",
      "train_op = optimizer.apply_gradients(gradients)\n",
      "\n",
      "# initialize the variables\n",
      "print(\"\\nInitializing the variables ...\\n\")\n",
      "sess = tf.InteractiveSession()\n",
      "tf.initialize_all_variables().run()\n",
      "\n",
      "# set up the saver\n",
      "print(\"\\nSetting up the Saver ...\\n\")\n",
      "saver = tf.train.Saver()\n",
      "if FLAGS.load:\n",
      "    print(\"\\nLoading pretrain parameters of the whole network ...\\n\")\n",
      "    saver.restore(sess, os.path.join(FLAGS.full_model,FLAGS.full_model_file))\n",
      "\n",
      "# set the summary writer\n",
      "print(\"\\nSetting the summary writers ...\\n\")\n",
      "summary_op = tf.merge_all_summaries()\n",
      "if not os.path.exists(FLAGS.logs_dir):\n",
      "    os.system(\"mkdir \"+FLAGS.logs_dir)\n",
      "if FLAGS.mode == 'train':\n",
      "    if os.path.exists(FLAGS.logs_dir+\"/train\"):\n",
      "        os.system(\"rm -r \"+FLAGS.logs_dir+\"/train\")\n",
      "    if os.path.exists(FLAGS.logs_dir+\"/valid\"):\n",
      "        os.system(\"rm -r \"+FLAGS.logs_dir+\"/valid\")\n",
      "    train_writer = tf.train.SummaryWriter(FLAGS.logs_dir+\"/train\",sess.graph)\n",
      "    valid_writer = tf.train.SummaryWriter(FLAGS.logs_dir+\"/valid\")\n",
      "elif FLAGS.mode == 'valid':\n",
      "    if os.path.exists(FLAGS.logs_dir+\"/complete_valid\"):\n",
      "        os.system(\"rm -r \"+FLAGS.logs_dir+\"/complete_valid\")\n",
      "    valid_writer = tf.train.SummaryWriter(FLAGS.logs_dir+\"/complete_valid\")    \n",
      "\n",
      "# read data_records from *.pickle\n",
      "print(\"\\nReading in and reprocessing all images ...\\n\")\n",
      "# check if FLAGS.data_dir folder exist\n",
      "if not os.path.exists(FLAGS.data_dir):\n",
      "    os.makedirs(FLAGS.data_dir)\n",
      "# check if the *.pickle file exist\n",
      "pickle_file = os.path.join(FLAGS.data_dir,FLAGS.pickle_name)\n",
      "if not os.path.exists(pickle_file):\n",
      "    # check if the *.zip exist\n",
      "    zip_file = os.path.join(FLAGS.data_dir,FLAGS.data_url.split('/')[-1])\n",
      "    if not os.path.exists(zip_file):\n",
      "        # download the *.zip\n",
      "        print(\"downloading \"+zip_file+\" ..\")\n",
      "        os.system(\"wget \"+FLAGS.data_url+\" -P \"+FLAGS.data_dir)\n",
      "        print(\"download finished!\")\n",
      "        # unzip the file\n",
      "        print(\"unzipping \"+zip_file+\" ..\")\n",
      "        os.system(\"unzip \"+zip_file+\" -d \"+FLAGS.data_dir)\n",
      "        print(\"unzipping finished!\")\n",
      "    # pack data into *.pickle\n",
      "    source_datadir =  zip_file.split('.')[0]\n",
      "    if not os.path.exists(source_datadir):\n",
      "        print(\"Error: source_datadir not found!!!\")\n",
      "        exit()\n",
      "    else:\n",
      "        data_types = ['training','validation']\n",
      "        data_list = {}\n",
      "        for data_type in data_types:\n",
      "            image_list = []\n",
      "            data_list[data_type] = []\n",
      "            # find all images\n",
      "            image_names = os.path.join(source_datadir,\"images\",data_type,'*.jpg')\n",
      "            print(\"\\nimage_names: %s\\n\"%(image_names))\n",
      "            image_list.extend(glob.glob(image_names))\n",
      "            if not image_list:\n",
      "                print(\"Error: no images found for \"+data_type+\"!!!\")\n",
      "                exit()\n",
      "            else:\n",
      "                # find corresponding annotations\n",
      "                for i in image_list:\n",
      "                    image_name = (i.split('/')[-1]).split('.')[0]\n",
      "                    annotation_name = os.path.join(source_datadir,\"annotations\",data_type,image_name+\".png\")\n",
      "                    if os.path.exists(annotation_name):\n",
      "                        # record this data tuple\n",
      "                        record = {'image':i,'annotation':annotation_name,'filename':image_name}\n",
      "                        data_list[data_type].append(record)\n",
      "            # shuffle all tuples\n",
      "            random.shuffle(data_list[data_type])\n",
      "            print(\"Number of %s tuples: %d\"%(data_type,len(data_list[data_type])))\n",
      "    print(\"Packing data into \"+pickle_file+\" ...\")\n",
      "    with open(pickle_file,'wb') as f:\n",
      "        pickle.dump(data_list,f,pickle.HIGHEST_PROTOCOL)\n",
      "    print(\"pickle finished!!!\")\n",
      "# load data_records from *.pickle\n",
      "with open(pickle_file,'rb') as f:\n",
      "    pickle_records = pickle.load(f)\n",
      "    train_records = pickle_records['training']\n",
      "    valid_records = pickle_records['validation']\n",
      "    del pickle_records\n",
      "    \n",
      "# initialize the data reader\n",
      "print(\"Initializing the data reader...\")\n",
      "reader_options = {'resize':True,'resize_size':FLAGS.image_size}\n",
      "if FLAGS.mode == 'train':\n",
      "    train_reader = dataset.BatchDatset(train_records[:10],reader_options)\n",
      "valid_reader = dataset.BatchDatset(valid_records[:10],reader_options)\n",
      "\n",
      "# check if FLAGS.full_model exist\n",
      "if not os.path.exists(FLAGS.full_model):\n",
      "    os.makedirs(FLAGS.full_model)\n",
      "\n",
      "# check variables' names\n",
      "print(\"\\n-------------all vairables' names:-----------\\n\")\n",
      "idx = 0\n",
      "for variable_name in tf.trainable_variables():\n",
      "    print(\"%d-th: %s\"%(idx,variable_name.op.name))\n",
      "    idx += 1\n",
      "\n",
      "# define saver for KITTI\n",
      "all_vars = tf.trainable_variables()\n",
      "saver_KITTI = tf.train.Saver({\"semantic_seg/conv1_1_w\":all_vars[0],\"semantic_seg/conv1_1_b\":all_vars[1],\"semantic_seg/conv1_2_w\":all_vars[2],\"semantic_seg/conv1_2_b\":all_vars[3],\n",
      "                              \"semantic_seg/conv2_1_w\":all_vars[4],\"semantic_seg/conv2_1_b\":all_vars[5],\"semantic_seg/conv2_2_w\":all_vars[6],\"semantic_seg/conv2_2_b\":all_vars[7],\n",
      "                              \"semantic_seg/conv3_1_w\":all_vars[8],\"semantic_seg/conv3_1_b\":all_vars[9],\"semantic_seg/conv3_2_w\":all_vars[10],\"semantic_seg/conv3_2_b\":all_vars[11],\"semantic_seg/conv3_3_w\":all_vars[12],\"semantic_seg/conv3_3_b\":all_vars[13],\"semantic_seg/conv3_4_w\":all_vars[14],\"semantic_seg/conv3_4_b\":all_vars[15],\n",
      "                              \"semantic_seg/conv4_1_w\":all_vars[16],\"semantic_seg/conv4_1_b\":all_vars[17],\"semantic_seg/conv4_2_w\":all_vars[18],\"semantic_seg/conv4_2_b\":all_vars[19],\"semantic_seg/conv4_3_w\":all_vars[20],\"semantic_seg/conv4_3_b\":all_vars[21],\"semantic_seg/conv4_4_w\":all_vars[22],\"semantic_seg/conv4_4_b\":all_vars[23],\n",
      "                              \"semantic_seg/conv5_1_w\":all_vars[24],\"semantic_seg/conv5_1_b\":all_vars[25],\"semantic_seg/conv5_2_w\":all_vars[26],\"semantic_seg/conv5_2_b\":all_vars[27],\"semantic_seg/conv5_3_w\":all_vars[28],\"semantic_seg/conv5_3_b\":all_vars[29],\n",
      "                              \"semantic_seg/fcn6_w\":all_vars[30],\"semantic_seg/fcn6_b\":all_vars[31],\"semantic_seg/fcn7_w\":all_vars[32],\"semantic_seg/fcn7_b\":all_vars[33],\n",
      "                              \"semantic_seg/deconv2_w\":all_vars[38],\"semantic_seg/deconv2_b\":all_vars[39]})\n",
      "\n",
      "# start training/ validation\n",
      "print(\"\\nStarting training/ validation...\\n\")\n",
      "if FLAGS.mode == 'train':\n",
      "    \n",
      "    # extrac the variables for KITTI-finetuning\n",
      "    snapshot_name = os.path.join(FLAGS.full_model,\"100000_model_KITTI.ckpt\")\n",
      "    saver_KITTI.save(sess,snapshot_name)\n",
      "            \n",
      "elif FLAGS.mode == 'valid':\n",
      "    # quantitative results\n",
      "    valid_images,valid_annotations=valid_reader.get_records()\n",
      "    feed_dict = {images:valid_images[:20],annotations:valid_annotations[:20],dropout_prob:1.0}\n",
      "    valid_loss,valid_summary = sess.run([loss,summary_op],feed_dict=feed_dict)\n",
      "    valid_writer.add_summary(valid_summary,FLAGS.max_iters)\n",
      "    print(\"==============================\")\n",
      "    print(\"Step: %d, valid_loss: %f\"%(FLAGS.max_iters,valid_loss))\n",
      "    print(\"==============================\")\n",
      "    # qualitative results\n",
      "    valid_images,valid_annotations=valid_reader.get_random_batch(FLAGS.batch_size)\n",
      "    feed_dict = {images:valid_images,annotations:valid_annotations,dropout_prob:1.0}\n",
      "    annotations_pred_results = sess.run(annotations_pred,feed_dict=feed_dict)\n",
      "    \n",
      "    valid_annotations = np.squeeze(valid_annotations,axis=3)\n",
      "    annotations_pred_results = np.squeeze(annotations_pred_results,axis=3)\n",
      "    \n",
      "    for n in xrange(FLAGS.batch_size):\n",
      "        print(\"Saving %d-th valid tuples for qualitative comparisons...\"%(n))\n",
      "        misc.imsave(FLAGS.logs_dir+\"/complete_valid/\"+str(n)+\"_image.png\",valid_images[n].astype(np.uint8))\n",
      "        misc.imsave(FLAGS.logs_dir+\"/complete_valid/\"+str(n)+\"_annotation.png\",valid_annotations[n].astype(np.uint8))\n",
      "        misc.imsave(FLAGS.logs_dir+\"/complete_valid/\"+str(n)+\"_prediction.png\",annotations_pred_results[n].astype(np.uint8))\n",
      "    print(\"saving finished!!!\")"
     ],
     "language": "python",
     "metadata": {},
     "outputs": []
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# prepare the *_gt.png for KITTI dataset\n",
      "\n",
      "import scipy.io \n",
      "import numpy as np \n",
      "import scipy.misc as misc\n",
      "import glob\n",
      "import pylab as pl\n",
      "import os\n",
      "\n",
      "'''\n",
      "Label number / Object class / RGB\n",
      "0 - NOT GROUND TRUTHED - 0 0 0\n",
      "1 - building - 153 0 0\n",
      "2 - sky - 0 51 102\n",
      "3 - road - 160 160 160\n",
      "4 - vegetation - 0 102 0\n",
      "5 - sidewalk - 255 228 196\n",
      "6 - car - 255 200 50\n",
      "7 - pedestrian - 255 153 255\n",
      "8 - cyclist - 204 153 255\n",
      "9 - signage - 130 255 255\n",
      "10 - fence - 193 120 87\n",
      "'''\n",
      "\n",
      "dataset_types = ['trn','test']\n",
      "dataset_idices = {'trn':['0009','0010','0011','0019'],\n",
      "                  'test':['0000','0004','0005','0013']}\n",
      "\n",
      "for dataset_type in dataset_types:\n",
      "    for dataset_idx in dataset_idices[dataset_type]:\n",
      "        \n",
      "        # find all images of a sequence\n",
      "        image_names = \"KITTI_public/image_02/\"+str(dataset_type)+\"/\"+str(dataset_idx)+\"/*.mat\"\n",
      "        image_list = []\n",
      "        image_list.extend(glob.glob(image_names))\n",
      "\n",
      "        # sort images by time-stamp\n",
      "        image_list.sort()\n",
      "\n",
      "        # for each image annotation\n",
      "        for k in xrange(len(image_list)):\n",
      "            # (height=375, width=1242)\n",
      "            # 0019 (374,1238)\n",
      "            image_train = scipy.io.loadmat(image_list[k]) \n",
      "            image_train = np.array(image_train['truth'])\n",
      "            height = image_train.shape[0]\n",
      "            width = image_train.shape[1]\n",
      "\n",
      "            # data info\n",
      "            if k == 0:\n",
      "                print(\"dataset name: %s\"%(image_names))\n",
      "                print(\"image number: %d\"%(len(image_list)))\n",
      "                print(\"image_size(height,width): (%d,%d)\"%(height,width))\n",
      "                # index range\n",
      "                min_idx = np.min(image_train)\n",
      "                max_idx = np.max(image_train)\n",
      "                print(\"min_idx: %d\"%(min_idx))\n",
      "                print(\"max_idx: %d\"%(max_idx))\n",
      "\n",
      "            # Create an empty image\n",
      "            image_train_color = np.zeros((height, width, 3), dtype=np.uint8)\n",
      "\n",
      "            # index to rgb map\n",
      "            idx2rgb = [(0,0,0),(153,0,0),(0,51,102),(160,160,160),(0,102,0),\n",
      "                       (255,228,196),(255,200,50),(255,153,255),\n",
      "                       (204,153,255),(130,255,255),(193,120,87)]\n",
      "\n",
      "            # color the index\n",
      "            for i in xrange(image_train.shape[0]):\n",
      "                for j in xrange(image_train.shape[1]):\n",
      "                    # map index to RGB\n",
      "                    idx = image_train[i][j]\n",
      "                    image_train_color[i][j] = idx2rgb[idx]\n",
      "\n",
      "#             # Display the image\n",
      "#             pl.imshow(image_train_color)\n",
      "#             pl.xlabel(\"[%s-%s]: %s/%s\"%(dataset_type,dataset_idx,image_list[k].split('.')[0].split('/')[-1],image_list[-1].split('.')[0].split('/')[-1])) \n",
      "#             pl.pause(0.0000001)\n",
      "#             pl.draw()\n",
      "\n",
      "            # save the *_mask.png\n",
      "            misc.imsave(image_list[k].split('.')[0]+\"_mask.png\",image_train_color)\n",
      "            \n",
      "            # save the *_gt.png\n",
      "            misc.imsave(image_list[k].split('.')[0]+\"_gt.png\",image_train.astype(np.uint8))\n"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "dataset name: KITTI_public/image_02/trn/0009/*.mat\n",
        "image number: 41\n",
        "image_size(height,width): (375,1242)\n",
        "min_idx: 0\n",
        "max_idx: 10\n",
        "dataset name: KITTI_public/image_02/trn/0010/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "image number: 30\n",
        "image_size(height,width): (375,1242)\n",
        "min_idx: 0\n",
        "max_idx: 6\n",
        "dataset name: KITTI_public/image_02/trn/0011/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "image number: 38\n",
        "image_size(height,width): (375,1242)\n",
        "min_idx: 0\n",
        "max_idx: 9\n",
        "dataset name: KITTI_public/image_02/trn/0019/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "image number: 31\n",
        "image_size(height,width): (374,1238)\n",
        "min_idx: 0\n",
        "max_idx: 8\n",
        "dataset name: KITTI_public/image_02/test/0000/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "image number: 16\n",
        "image_size(height,width): (375,1242)\n",
        "min_idx: 0\n",
        "max_idx: 9\n",
        "dataset name: KITTI_public/image_02/test/0004/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "image number: 32\n",
        "image_size(height,width): (375,1242)\n",
        "min_idx: 0\n",
        "max_idx: 9\n",
        "dataset name: KITTI_public/image_02/test/0005/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "image number: 30\n",
        "image_size(height,width): (375,1242)\n",
        "min_idx: 0\n",
        "max_idx: 6\n",
        "dataset name: KITTI_public/image_02/test/0013/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "image number: 34\n",
        "image_size(height,width): (375,1242)\n",
        "min_idx: 0\n",
        "max_idx: 9\n"
       ]
      }
     ],
     "prompt_number": 30
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# prepare the original rgb images from KITTI\n",
      "\n",
      "import scipy.io \n",
      "import numpy as np \n",
      "import scipy.misc as misc\n",
      "import glob\n",
      "import pylab as pl\n",
      "import os\n",
      "import sys\n",
      "\n",
      "dataset_types = ['trn','test']\n",
      "dataset_idices = {'trn':['0009','0010','0011','0019'],\n",
      "                  'test':['0000','0004','0005','0013']}\n",
      "fake2true = {'0009':'0036','0010':'0056','0011':'0059','0019':'0071',\n",
      "           '0000':'0005','0004':'0014','0005':'0015','0013':'0091'}\n",
      "\n",
      "for dataset_type in dataset_types:\n",
      "    for dataset_idx in dataset_idices[dataset_type]:\n",
      "        # find all images of a sequence\n",
      "        image_names = \"KITTI_public/image_02/\"+str(dataset_type)+\"/\"+str(dataset_idx)+\"/*.mat\"\n",
      "        image_list = []\n",
      "        image_list.extend(glob.glob(image_names))\n",
      "\n",
      "        # log info\n",
      "        print(\"processing: %s\"%(image_names))\n",
      "                \n",
      "        # sort images by time-stamp\n",
      "        image_list.sort()\n",
      "\n",
      "        # to find each original image from KITTI\n",
      "        for k in xrange(len(image_list)):\n",
      "            # name of its original image\n",
      "            image_name = \"KITTI/\"+fake2true[dataset_idx]+\"_\"+dataset_idx+\"/*/*_sync/image_02/data/*\"+image_list[k].split('.')[0].split('/')[-1]+\".png\"\n",
      "            \n",
      "            # find the full path\n",
      "            full_path = glob.glob(image_name)\n",
      "\n",
      "            # current path\n",
      "            target_path = image_list[k].split('.')[0]+\".png\"\n",
      "            \n",
      "            # copy the image to current folder\n",
      "            os.system(\"cp \"+str(full_path[0])+\" \"+str(target_path))\n",
      "                        "
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "processing: KITTI_public/image_02/trn/0009/*.mat\n",
        "processing: KITTI_public/image_02/trn/0010/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "processing: KITTI_public/image_02/trn/0011/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "processing: KITTI_public/image_02/trn/0019/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "processing: KITTI_public/image_02/test/0000/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "processing: KITTI_public/image_02/test/0004/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "processing: KITTI_public/image_02/test/0005/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "processing: KITTI_public/image_02/test/0013/*.mat"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n"
       ]
      }
     ],
     "prompt_number": 31
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# pack the KITTI dataset into \"Data_zoo/KITTI/KITTI.pickle\"\n",
      "\n",
      "import os\n",
      "import random\n",
      "from six.moves import cPickle as pickle\n",
      "import glob\n",
      "\n",
      "data_types = ['trn','test']\n",
      "data_list = {}\n",
      "for data_type in data_types:\n",
      "    image_list = []\n",
      "    data_list[data_type] = []\n",
      "    # find all images\n",
      "    image_names = \"KITTI_public/image_02/\"+data_type+\"/00*/*.png\"\n",
      "    print(\"\\nimage_names: %s\\n\"%(image_names))\n",
      "    image_list.extend(glob.glob(image_names))\n",
      "    if not image_list:\n",
      "        print(\"Error: no images found for \"+data_type+\"!!!\")\n",
      "        exit()\n",
      "    else:\n",
      "        # find corresponding annotations\n",
      "        for i in image_list:\n",
      "            annotation_name = i.split('.')[0]+\"_gt.png\"\n",
      "            if os.path.exists(annotation_name):\n",
      "                # record this data tuple\n",
      "                record = {'image':i,'annotation':annotation_name}\n",
      "                data_list[data_type].append(record)\n",
      "    # shuffle all tuples\n",
      "    random.shuffle(data_list[data_type])\n",
      "    print(\"Number of %s tuples: %d\"%(data_type,len(data_list[data_type])))\n",
      "\n",
      "pickle_file = \"Data_zoo/KITTI/KITTI.pickle\"\n",
      "print(\"Packing data into \"+pickle_file+\" ...\")\n",
      "with open(pickle_file,'wb') as f:\n",
      "    pickle.dump(data_list,f,pickle.HIGHEST_PROTOCOL)\n",
      "    "
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "image_names: KITTI_public/image_02/trn/00*/*.png\n",
        "\n",
        "Number of trn tuples: 140\n",
        "\n",
        "image_names: KITTI_public/image_02/test/00*/*.png\n",
        "\n",
        "Number of test tuples: 112\n",
        "Packing data into Data_zoo/KITTI/KITTI.pickle ...\n"
       ]
      }
     ],
     "prompt_number": 32
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# fine-tuning on KITTI\n",
      "\n",
      "import tensorflow as tf\n",
      "import numpy as np\n",
      "import os\n",
      "import scipy.io\n",
      "import glob\n",
      "import random\n",
      "import BatchDatsetReader as dataset\n",
      "import scipy.misc as misc\n",
      "from six.moves import cPickle as pickle\n",
      "\n",
      "# reset the graph\n",
      "tf.reset_default_graph()\n",
      "\n",
      "# reset tf.flags.FLAGS\n",
      "import argparse\n",
      "tf.reset_default_graph()\n",
      "tf.flags.FLAGS = tf.python.platform.flags._FlagValues()\n",
      "tf.flags._global_parser = argparse.ArgumentParser()\n",
      "\n",
      "# set tf.flags.FLAGS\n",
      "FLAGS = tf.flags.FLAGS\n",
      "tf.flags.DEFINE_integer(\"batch_size\",\"1\",\"batch size for training\")\n",
      "tf.flags.DEFINE_string(\"logs_dir\",\"logs/KITTI\",\"path to logs directory\")\n",
      "tf.flags.DEFINE_string(\"data_dir\",\"Data_zoo/KITTI/\",\"path to dataset\")\n",
      "tf.flags.DEFINE_string(\"pickle_name\",\"KITTI.pickle\",\"pickle file of the data\")\n",
      "tf.flags.DEFINE_float(\"learning_rate\",\"1e-4\",\"learning rate for the optimizier\")\n",
      "tf.flags.DEFINE_string(\"model_dir\",\"Model_zoo/\",\"path to vgg model mat\")\n",
      "tf.flags.DEFINE_bool(\"debug\",\"True\",\"Debug model: True/False\")\n",
      "tf.flags.DEFINE_string(\"mode\",\"train\",\"Mode: train/ valid\")\n",
      "tf.flags.DEFINE_integer(\"max_iters\",\"100001\",\"max training iterations of batches\")\n",
      "tf.flags.DEFINE_integer(\"num_classes\",\"11\",\"mit_sceneparsing with (150+1) classes\")\n",
      "tf.flags.DEFINE_string(\"model_weights\",\"http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat\",\"pretrained weights of the CNN in use\")\n",
      "tf.flags.DEFINE_string(\"full_model\",\"full_model/\",\"trained parameters of the whole network\")\n",
      "tf.flags.DEFINE_string(\"full_model_file\",\"100000_model.ckpt\",\"pretrained parameters of the whole network\")\n",
      "tf.flags.DEFINE_bool(\"load\",\"True\",\"load in pretrained parameters\")\n",
      "tf.flags.DEFINE_string(\"name\",\"KITTI\",\"dataset name\")\n",
      "\n",
      "# check if the CNN weights folder exist\n",
      "if not os.path.exists(FLAGS.model_dir):\n",
      "    os.makedirs(FLAGS.model_dir)\n",
      "    \n",
      "# check if the CNN weights file exist\n",
      "weights_file = os.path.join(FLAGS.model_dir,FLAGS.model_weights.split('/')[-1])\n",
      "if not os.path.exists(weights_file):\n",
      "    print(\"\\ndownloading \"+weights_file+\" ...\")\n",
      "    os.system(\"wget \"+FLAGS.model_weights+\" -P \"+FLAGS.model_dir)\n",
      "    print(\"download finished!\\n\")\n",
      "else:\n",
      "    print(\"\\n\"+weights_file+\" has already been downloaded.\\n\")\n",
      "    \n",
      "# load the weights file\n",
      "print(\"\\nloading pretrained weights from: \"+weights_file)\n",
      "pretrain_weights = scipy.io.loadmat(weights_file)\n",
      "print(\"loading finished!\\n\")\n",
      "\n",
      "# the mean RGB\n",
      "mean = pretrain_weights['normalization'][0][0][0] # shape(224,224,3)\n",
      "mean_pixel = np.mean(mean,axis=(0,1)) # average on (height,width) to compute the mean RGB   \n",
      "\n",
      "# the weights and biases\n",
      "weights_biases = np.squeeze(pretrain_weights['layers'])\n",
      "\n",
      "# network input data\n",
      "dropout_prob = tf.placeholder(tf.float32,name=\"dropout_probability\")\n",
      "images = tf.placeholder(tf.float32,shape=[None,None,None,3],name=\"input_images\")\n",
      "annotations = tf.placeholder(tf.int32,shape=[None,None,None,1],name=\"input_annotations\")\n",
      "\n",
      "# subtract the mean image\n",
      "processed_image = images - mean_pixel\n",
      "\n",
      "# construct the semantic_seg network\n",
      "with tf.variable_scope(\"semantic_seg\"):\n",
      "    # convs of the vgg net\n",
      "    net = {}\n",
      "    layers = [\n",
      "        'conv1_1','relu1_1','conv1_2','relu1_2','pool1',\n",
      "        'conv2_1','relu2_1','conv2_2','relu2_2','pool2',\n",
      "        'conv3_1','relu3_1','conv3_2','relu3_2','conv3_3','relu3_3','conv3_4','relu3_4','pool3',\n",
      "        'conv4_1','relu4_1','conv4_2','relu4_2','conv4_3','relu4_3','conv4_4','relu4_4','pool4',\n",
      "        'conv5_1','relu5_1','conv5_2','relu5_2','conv5_3' #,'relu5_3','conv5_4','relu5_4','pool5'\n",
      "    ]\n",
      "    current = processed_image\n",
      "\n",
      "    # sanity check\n",
      "    print(\"processed_image: {}\".format(processed_image.get_shape()))\n",
      "\n",
      "    for i,name in enumerate(layers):\n",
      "        type = name[:4]\n",
      "        if type == 'conv':\n",
      "            # matconvnet weights: (width, height, in_channels, out_channels)\n",
      "            # tensorflow weights: (height, width, in_channels, out_channels)\n",
      "            weights, biases = weights_biases[i][0][0][0][0]\n",
      "\n",
      "            weights = np.transpose(weights,(1,0,2,3)) \n",
      "            biases = np.squeeze(biases)\n",
      "            \n",
      "            init = tf.constant_initializer(weights,dtype=tf.float32)\n",
      "            weights = tf.get_variable(initializer=init,shape=weights.shape,name=name+\"_w\")\n",
      "            \n",
      "            init = tf.constant_initializer(biases,dtype=tf.float32)\n",
      "            biases = tf.get_variable(initializer=init,shape=biases.shape,name=name+\"_b\")\n",
      "            \n",
      "            current = tf.nn.conv2d(current,weights,strides=[1,1,1,1],padding=\"SAME\")\n",
      "            current = tf.nn.bias_add(current,biases,name=name)\n",
      "\n",
      "            # sanity check\n",
      "            print(\"{}: {}\".format(name,current.get_shape()))\n",
      "        elif type == 'relu':\n",
      "            current = tf.nn.relu(current,name=name)\n",
      "            if FLAGS.debug:\n",
      "                tf.histogram_summary(current.op.name+\"/activation\",current)\n",
      "                tf.scalar_summary(current.op.name+\"/sparsity\",tf.nn.zero_fraction(current))\n",
      "            # sanity check\n",
      "            print(\"{}: {}\".format(name,current.get_shape()))        \n",
      "        elif type == 'pool':\n",
      "            if name == 'pool5':\n",
      "                current = tf.nn.max_pool(current,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\",name=name)\n",
      "            else:\n",
      "                current = tf.nn.avg_pool(current,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\",name=name)\n",
      "            # sanity check\n",
      "            print(\"{}: {}\".format(name,current.get_shape()))\n",
      "        net[name] = current\n",
      "             \n",
      "    net['pool5'] = tf.nn.max_pool(net['conv5_3'],ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\",name=name)\n",
      "    # sanity check\n",
      "    print(\"pool5: {}\".format(net['pool5'].get_shape()))\n",
      "    \n",
      "     # fcn6\n",
      "    init = tf.truncated_normal(shape=[7,7,512,4096],stddev=0.02)\n",
      "    fcn6_w = tf.get_variable(initializer=init,name=\"fcn6_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[4096])\n",
      "    fcn6_b = tf.get_variable(initializer=init,name=\"fcn6_b\")\n",
      "\n",
      "    fcn6 = tf.nn.conv2d(net['pool5'],fcn6_w,strides=[1,1,1,1],padding=\"SAME\")\n",
      "    fcn6 = tf.nn.bias_add(fcn6,fcn6_b,name=\"fcn6\")\n",
      "\n",
      "    relu6 = tf.nn.relu(fcn6,name=\"relu6\")\n",
      "    if FLAGS.debug:\n",
      "        tf.histogram_summary(\"relu6/activation\", relu6, collections=None, name=None)\n",
      "        tf.scalar_summary(\"relu6/sparsity\", tf.nn.zero_fraction(relu6), collections=None, name=None)\n",
      "    dropout6 = tf.nn.dropout(relu6, keep_prob=dropout_prob, noise_shape=None, seed=None, name=\"dropout6\")\n",
      "    # sanity check\n",
      "    print(\"dropout6: {}\".format(dropout6.get_shape()))\n",
      "\n",
      "     # fcn7\n",
      "    init = tf.truncated_normal(shape=[1,1,4096,4096],stddev=0.02)\n",
      "    fcn7_w = tf.get_variable(initializer=init,name=\"fcn7_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[4096])\n",
      "    fcn7_b = tf.get_variable(initializer=init,name=\"fcn7_b\")\n",
      "\n",
      "    fcn7 = tf.nn.conv2d(dropout6, fcn7_w, strides=[1,1,1,1], padding=\"SAME\", use_cudnn_on_gpu=None, data_format=None, name=None)\n",
      "    fcn7 = tf.nn.bias_add(fcn7, fcn7_b, data_format=None, name=\"fcn7\")\n",
      "\n",
      "    relu7 = tf.nn.relu(fcn7,name=\"relu7\")\n",
      "    if FLAGS.debug:\n",
      "        tf.histogram_summary(\"relu7/activation\", relu7, collections=None, name=None)\n",
      "        tf.scalar_summary(\"relu7/sparsity\", tf.nn.zero_fraction(relu7), collections=None, name=None)\n",
      "    dropout7 = tf.nn.dropout(relu7, keep_prob=dropout_prob, noise_shape=None, seed=None, name=\"dropout7\")\n",
      "    # sanity check\n",
      "    print(\"dropout7: {}\".format(dropout7.get_shape()))\n",
      "\n",
      "    # fcn8\n",
      "    init = tf.truncated_normal(shape=[1,1,4096,FLAGS.num_classes],stddev=0.02)\n",
      "    fcn8_w = tf.get_variable(initializer=init,name=\"fcn8_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[FLAGS.num_classes])\n",
      "    fcn8_b = tf.get_variable(initializer=init,name=\"fcn8_b\")\n",
      "\n",
      "    fcn8 = tf.nn.conv2d(dropout7, fcn8_w, strides=[1,1,1,1], padding=\"SAME\", use_cudnn_on_gpu=None, data_format=None, name=None)\n",
      "    fcn8 = tf.nn.bias_add(fcn8, fcn8_b, data_format=None, name=\"fcn8\")\n",
      "    # sanity check\n",
      "    print(\"fcn8: {}\".format(fcn8.get_shape()))\n",
      "\n",
      "    # deconv1 + net['pool4']: x32 -> x16\n",
      "    s = 2\n",
      "    k = 2*s\n",
      "    in_channel = FLAGS.num_classes\n",
      "    out_channel = net['pool4'].get_shape()[3].value\n",
      "    out_shape = tf.shape(net['pool4'])\n",
      "\n",
      "    init = tf.truncated_normal(shape=[k,k,out_channel,in_channel],stddev=0.02)\n",
      "    deconv1_w = tf.get_variable(initializer=init,name=\"deconv1_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[out_channel])\n",
      "    deconv1_b = tf.get_variable(initializer=init,name=\"deconv1_b\")\n",
      "\n",
      "    # sanity check\n",
      "    print(\"deconv1 output_shape: {}\".format(net['pool4'].get_shape()))\n",
      "\n",
      "    deconv1 = tf.nn.conv2d_transpose(fcn8, deconv1_w, output_shape=out_shape, strides=[1,s,s,1], padding='SAME', name=None)\n",
      "    deconv1 = tf.nn.bias_add(deconv1, deconv1_b, data_format=None, name=\"deconv1\")\n",
      "\n",
      "    fuse1 = tf.add(deconv1, net['pool4'], name=\"fuse1\")\n",
      "    \n",
      "    # deconv2 + net['pool3']: x16 -> x8\n",
      "    s = 2\n",
      "    k = 2*s\n",
      "    in_channel = out_channel\n",
      "    out_channel = net['pool3'].get_shape()[3].value\n",
      "    out_shape = tf.shape(net['pool3'])\n",
      "\n",
      "    init = tf.truncated_normal(shape=[k,k,out_channel,in_channel],stddev=0.02)\n",
      "    deconv2_w = tf.get_variable(initializer=init,name=\"deconv2_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[out_channel])\n",
      "    deconv2_b = tf.get_variable(initializer=init,name=\"deconv2_b\")\n",
      "\n",
      "    deconv2 = tf.nn.conv2d_transpose(fuse1, deconv2_w, output_shape=out_shape, strides=[1,s,s,1], padding='SAME', name=None)\n",
      "    deconv2 = tf.nn.bias_add(deconv2, deconv2_b, data_format=None, name=\"deconv2\")\n",
      "\n",
      "    fuse2 = tf.add(deconv2,net['pool3'],name=\"fuse2\")\n",
      "\n",
      "    # deconv3: x8 -> image_size\n",
      "    s = 8\n",
      "    k = 2*s\n",
      "    in_channel = out_channel\n",
      "    out_channel = FLAGS.num_classes\n",
      "    out_shape = tf.pack([tf.shape(processed_image)[0],tf.shape(processed_image)[1],tf.shape(processed_image)[2],out_channel])\n",
      "            \n",
      "    init = tf.truncated_normal(shape=[k,k,out_channel,in_channel],stddev=0.02)\n",
      "    deconv3_w = tf.get_variable(initializer=init,name=\"deconv3_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[out_channel])\n",
      "    deconv3_b = tf.get_variable(initializer=init,name=\"deconv3_b\")\n",
      "\n",
      "    deconv3 = tf.nn.conv2d_transpose(fuse2, deconv3_w, output_shape=out_shape, strides=[1,s,s,1], padding='SAME', name=None)\n",
      "    deconv3 = tf.nn.bias_add(deconv3, deconv3_b, data_format=None, name=\"deconv3\")\n",
      "\n",
      "    # per-pixel prediction\n",
      "    annotations_pred = tf.argmax(deconv3, dimension=3, name=None)\n",
      "    annotations_pred = tf.expand_dims(annotations_pred, dim=3, name=\"prediction\")\n",
      "\n",
      "# log images, annotations, annotations_pred\n",
      "tf.image_summary(\"images\", images, max_images=1, collections=None, name=None)\n",
      "tf.image_summary(\"annotations\", tf.cast(annotations,tf.uint8), max_images=1, collections=None, name=None)\n",
      "tf.image_summary(\"annotations_pred\", tf.cast(annotations_pred,tf.uint8), max_images=1, collections=None, name=None)\n",
      "\n",
      "# construct the loss\n",
      "loss = tf.nn.sparse_softmax_cross_entropy_with_logits(deconv3, tf.squeeze(annotations, squeeze_dims=[3]))\n",
      "loss = tf.reduce_mean(loss, reduction_indices=None, keep_dims=False, name=\"pixel-wise_cross-entropy_loss\")\n",
      "\n",
      "# log the loss\n",
      "tf.scalar_summary(\"pixel-wise_cross-entropy_loss\", loss, collections=None, name=None)\n",
      "\n",
      "# log all the trainable variables\n",
      "trainabel_vars = tf.trainable_variables()\n",
      "if FLAGS.debug:\n",
      "    for var in trainabel_vars:\n",
      "        tf.histogram_summary(var.op.name+\"/values\", var, collections=None, name=None)\n",
      "        tf.add_to_collection(\"sum(t ** 2) / 2 of all trainable_vars\", tf.nn.l2_loss(var))\n",
      "        \n",
      "# construct the optimizier\n",
      "optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n",
      "gradients = optimizer.compute_gradients(loss,trainabel_vars)\n",
      "if FLAGS.debug:\n",
      "    # log the gradients\n",
      "    for grad, var in gradients:\n",
      "        tf.histogram_summary(var.op.name+\"/gradients\", grad, collections=None, name=None)\n",
      "train_op = optimizer.apply_gradients(gradients)\n",
      "\n",
      "# initialize the variables\n",
      "print(\"\\nInitializing the variables ...\\n\")\n",
      "sess = tf.InteractiveSession()\n",
      "tf.initialize_all_variables().run()\n",
      "\n",
      "# set up the saver\n",
      "print(\"\\nSetting up the Saver ...\\n\")\n",
      "saver = tf.train.Saver()\n",
      "all_vars = tf.trainable_variables()\n",
      "saver_KITTI = tf.train.Saver({\"semantic_seg/conv1_1_w\":all_vars[0],\"semantic_seg/conv1_1_b\":all_vars[1],\"semantic_seg/conv1_2_w\":all_vars[2],\"semantic_seg/conv1_2_b\":all_vars[3],\n",
      "                              \"semantic_seg/conv2_1_w\":all_vars[4],\"semantic_seg/conv2_1_b\":all_vars[5],\"semantic_seg/conv2_2_w\":all_vars[6],\"semantic_seg/conv2_2_b\":all_vars[7],\n",
      "                              \"semantic_seg/conv3_1_w\":all_vars[8],\"semantic_seg/conv3_1_b\":all_vars[9],\"semantic_seg/conv3_2_w\":all_vars[10],\"semantic_seg/conv3_2_b\":all_vars[11],\"semantic_seg/conv3_3_w\":all_vars[12],\"semantic_seg/conv3_3_b\":all_vars[13],\"semantic_seg/conv3_4_w\":all_vars[14],\"semantic_seg/conv3_4_b\":all_vars[15],\n",
      "                              \"semantic_seg/conv4_1_w\":all_vars[16],\"semantic_seg/conv4_1_b\":all_vars[17],\"semantic_seg/conv4_2_w\":all_vars[18],\"semantic_seg/conv4_2_b\":all_vars[19],\"semantic_seg/conv4_3_w\":all_vars[20],\"semantic_seg/conv4_3_b\":all_vars[21],\"semantic_seg/conv4_4_w\":all_vars[22],\"semantic_seg/conv4_4_b\":all_vars[23],\n",
      "                              \"semantic_seg/conv5_1_w\":all_vars[24],\"semantic_seg/conv5_1_b\":all_vars[25],\"semantic_seg/conv5_2_w\":all_vars[26],\"semantic_seg/conv5_2_b\":all_vars[27],\"semantic_seg/conv5_3_w\":all_vars[28],\"semantic_seg/conv5_3_b\":all_vars[29],\n",
      "                              \"semantic_seg/fcn6_w\":all_vars[30],\"semantic_seg/fcn6_b\":all_vars[31],\"semantic_seg/fcn7_w\":all_vars[32],\"semantic_seg/fcn7_b\":all_vars[33],\n",
      "                              \"semantic_seg/deconv2_w\":all_vars[38],\"semantic_seg/deconv2_b\":all_vars[39]})\n",
      "if FLAGS.load:\n",
      "    print(\"\\nLoading pretrain parameters of the whole network ...\\n\")\n",
      "    saver_KITTI.restore(sess, os.path.join(FLAGS.full_model,FLAGS.full_model_file))\n",
      "    \n",
      "# set the summary writer\n",
      "print(\"\\nSetting the summary writers ...\\n\")\n",
      "summary_op = tf.merge_all_summaries()\n",
      "if not os.path.exists(FLAGS.logs_dir):\n",
      "    os.system(\"mkdir \"+FLAGS.logs_dir)\n",
      "if FLAGS.mode == 'train':\n",
      "    if os.path.exists(FLAGS.logs_dir+\"/train\"):\n",
      "        os.system(\"rm -r \"+FLAGS.logs_dir+\"/train\")\n",
      "    if os.path.exists(FLAGS.logs_dir+\"/valid\"):\n",
      "        os.system(\"rm -r \"+FLAGS.logs_dir+\"/valid\")\n",
      "    train_writer = tf.train.SummaryWriter(FLAGS.logs_dir+\"/train\",sess.graph)\n",
      "    valid_writer = tf.train.SummaryWriter(FLAGS.logs_dir+\"/valid\")\n",
      "elif FLAGS.mode == 'valid':\n",
      "    if os.path.exists(FLAGS.logs_dir+\"/complete_valid\"):\n",
      "        os.system(\"rm -r \"+FLAGS.logs_dir+\"/complete_valid\")\n",
      "    valid_writer = tf.train.SummaryWriter(FLAGS.logs_dir+\"/complete_valid\")\n",
      "    \n",
      "# read data_records from *.pickle\n",
      "print(\"\\nReading in and reprocessing all images ...\\n\")\n",
      "# check if FLAGS.data_dir folder exist\n",
      "if not os.path.exists(FLAGS.data_dir):\n",
      "    os.makedirs(FLAGS.data_dir)\n",
      "# check if the *.pickle file exist\n",
      "pickle_file = os.path.join(FLAGS.data_dir,FLAGS.pickle_name)\n",
      "# load data_records from *.pickle\n",
      "with open(pickle_file,'rb') as f:\n",
      "    pickle_records = pickle.load(f)\n",
      "    train_records = pickle_records['trn']\n",
      "    valid_records = pickle_records['test']\n",
      "    del pickle_records\n",
      "\n",
      "# initialize the data reader\n",
      "print(\"Initializing the data reader...\")\n",
      "reader_options = {'different_size':True}\n",
      "if FLAGS.mode == 'train':\n",
      "    train_reader = dataset.BatchDatset(train_records,reader_options)\n",
      "valid_reader = dataset.BatchDatset(valid_records,reader_options)\n",
      "\n",
      "# check if FLAGS.full_model exist\n",
      "if not os.path.exists(os.path.join(FLAGS.full_model,FLAGS.name)):\n",
      "    os.makedirs(os.path.join(FLAGS.full_model,FLAGS.name))\n",
      "\n",
      "# start training/ validation\n",
      "print(\"\\nStarting training/ validation...\\n\")\n",
      "if FLAGS.mode == 'train':\n",
      "    for itr in xrange(FLAGS.max_iters):\n",
      "        # read next batch\n",
      "        train_images, train_annotations = train_reader.next_image(FLAGS.batch_size)\n",
      "        feed_dict = {images:train_images,annotations:train_annotations,dropout_prob:0.85}\n",
      "        # training\n",
      "        sess.run(train_op,feed_dict=feed_dict)\n",
      "        # log training info\n",
      "        if itr % 10 == 0:\n",
      "            train_loss, train_summary = sess.run([loss,summary_op],feed_dict=feed_dict)\n",
      "            train_writer.add_summary(train_summary,itr)\n",
      "            print(\"Step: %d, train_loss: %f\"%(itr,train_loss))\n",
      "        # log valid info\n",
      "        if itr % 100 == 0:\n",
      "            valid_images, valid_annotations = valid_reader.next_image(FLAGS.batch_size)\n",
      "            feed_dict = {images:valid_images,annotations:valid_annotations,dropout_prob:1.0}\n",
      "            valid_loss, valid_summary = sess.run([loss,summary_op],feed_dict=feed_dict)\n",
      "            valid_writer.add_summary(valid_summary,itr)\n",
      "            print(\"==============================\")\n",
      "            print(\"Step: %d, valid_loss: %f\"%(itr,valid_loss))\n",
      "            print(\"==============================\")\n",
      "        # save snapshot\n",
      "        if itr % 500 == 0:\n",
      "            snapshot_name = os.path.join(os.path.join(FLAGS.full_model,FLAGS.name),str(itr)+\"_model.ckpt\")\n",
      "            saver.save(sess,snapshot_name)\n",
      "elif FLAGS.mode == 'valid':\n",
      "    # quantitative results\n",
      "    valid_images,valid_annotations=valid_reader.next_image(FLAGS.batch_size)\n",
      "    feed_dict = {images:valid_images[:20],annotations:valid_annotations[:20],dropout_prob:1.0}\n",
      "    valid_loss,valid_summary = sess.run([loss,summary_op],feed_dict=feed_dict)\n",
      "    valid_writer.add_summary(valid_summary,FLAGS.max_iters)\n",
      "    print(\"==============================\")\n",
      "    print(\"Step: %d, valid_loss: %f\"%(FLAGS.max_iters,valid_loss))\n",
      "    print(\"==============================\")\n",
      "    # qualitative results\n",
      "    valid_images,valid_annotations=valid_reader.next_image(FLAGS.batch_size)\n",
      "    feed_dict = {images:valid_images,annotations:valid_annotations,dropout_prob:1.0}\n",
      "    annotations_pred_results = sess.run(annotations_pred,feed_dict=feed_dict)\n",
      "    \n",
      "    valid_annotations = np.squeeze(valid_annotations,axis=3)\n",
      "    annotations_pred_results = np.squeeze(annotations_pred_results,axis=3)\n",
      "    \n",
      "    for n in xrange(FLAGS.batch_size):\n",
      "        print(\"Saving %d-th valid tuples for qualitative comparisons...\"%(n))\n",
      "        misc.imsave(FLAGS.logs_dir+\"/complete_valid/\"+str(n)+\"_image.png\",valid_images[n].astype(np.uint8))\n",
      "        misc.imsave(FLAGS.logs_dir+\"/complete_valid/\"+str(n)+\"_annotation.png\",valid_annotations[n].astype(np.uint8))\n",
      "        misc.imsave(FLAGS.logs_dir+\"/complete_valid/\"+str(n)+\"_prediction.png\",annotations_pred_results[n].astype(np.uint8))\n",
      "    print(\"saving finished!!!\")"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "Model_zoo/imagenet-vgg-verydeep-19.mat has already been downloaded.\n",
        "\n",
        "\n",
        "loading pretrained weights from: Model_zoo/imagenet-vgg-verydeep-19.mat\n",
        "loading finished!\n"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "processed_image: (?, ?, ?, 3)\n",
        "conv1_1: (?, ?, ?, 64)\n",
        "relu1_1: (?, ?, ?, 64)\n",
        "conv1_2: (?, ?, ?, 64)\n",
        "relu1_2: (?, ?, ?, 64)\n",
        "pool1: (?, ?, ?, 64)\n",
        "conv2_1: (?, ?, ?, 128)"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "relu2_1: (?, ?, ?, 128)\n",
        "conv2_2: (?, ?, ?, 128)\n",
        "relu2_2: (?, ?, ?, 128)\n",
        "pool2: (?, ?, ?, 128)\n",
        "conv3_1: (?, ?, ?, 256)\n",
        "relu3_1: (?, ?, ?, 256)\n",
        "conv3_2: (?, ?, ?, 256)\n",
        "relu3_2: (?, ?, ?, 256)"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "conv3_3: (?, ?, ?, 256)\n",
        "relu3_3: (?, ?, ?, 256)\n",
        "conv3_4: (?, ?, ?, 256)\n",
        "relu3_4: (?, ?, ?, 256)\n",
        "pool3: (?, ?, ?, 256)\n",
        "conv4_1: (?, ?, ?, 512)"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "relu4_1: (?, ?, ?, 512)\n",
        "conv4_2: (?, ?, ?, 512)\n",
        "relu4_2: (?, ?, ?, 512)\n",
        "conv4_3: (?, ?, ?, 512)"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "relu4_3: (?, ?, ?, 512)\n",
        "conv4_4: (?, ?, ?, 512)"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "relu4_4: (?, ?, ?, 512)\n",
        "pool4: (?, ?, ?, 512)\n",
        "conv5_1: (?, ?, ?, 512)\n",
        "relu5_1: (?, ?, ?, 512)\n",
        "conv5_2: (?, ?, ?, 512)"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "relu5_2: (?, ?, ?, 512)\n",
        "conv5_3: (?, ?, ?, 512)\n",
        "pool5: (?, ?, ?, 512)\n",
        "dropout6: (?, ?, ?, 4096)"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "dropout7: (?, ?, ?, 4096)\n",
        "fcn8: (?, ?, ?, 11)\n",
        "deconv1 output_shape: (?, ?, ?, 512)\n",
        "\n",
        "Initializing the variables ...\n"
       ]
      }
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [],
     "language": "python",
     "metadata": {},
     "outputs": []
    }
   ],
   "metadata": {}
  }
 ]
}