{
 "metadata": {
  "name": "",
  "signature": "sha256:f7e27893211c99f4dbcf5be6f75e78d475b56ac26d47a316872b8daf01e48736"
 },
 "nbformat": 3,
 "nbformat_minor": 0,
 "worksheets": [
  {
   "cells": [
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "import tensorflow as tf\n",
      "import numpy as np\n",
      "import os\n",
      "import scipy.io\n",
      "import glob\n",
      "import random\n",
      "import BatchDatsetReader as dataset\n",
      "import scipy.misc as misc\n",
      "\n",
      "# reset the graph\n",
      "tf.reset_default_graph()\n",
      "\n",
      "# reset tf.flags.FLAGS\n",
      "import argparse\n",
      "tf.reset_default_graph()\n",
      "tf.flags.FLAGS = tf.python.platform.flags._FlagValues()\n",
      "tf.flags._global_parser = argparse.ArgumentParser()\n",
      "\n",
      "# set tf.flags.FLAGS\n",
      "FLAGS = tf.flags.FLAGS\n",
      "tf.flags.DEFINE_integer(\"batch_size\",\"2\",\"batch size for training\")\n",
      "tf.flags.DEFINE_string(\"logs_dir\",\"logs/\",\"path to logs directory\")\n",
      "tf.flags.DEFINE_string(\"data_dir\",\"Data_zoo/MIT_SceneParsing/\",\"path to dataset\")\n",
      "tf.flags.DEFINE_string(\"pickle_name\",\"MITSceneParsing.pickle\",\"pickle file of the data\")\n",
      "tf.flags.DEFINE_string(\"data_url\",\"http://sceneparsing.csail.mit.edu/data/ADEChallengeData2016.zip\",\"url of the data\")\n",
      "tf.flags.DEFINE_float(\"learning_rate\",\"1e-4\",\"learning rate for the optimizier\")\n",
      "tf.flags.DEFINE_string(\"model_dir\",\"Model_zoo/\",\"path to vgg model mat\")\n",
      "tf.flags.DEFINE_bool(\"debug\",\"True\",\"Debug model: True/False\")\n",
      "tf.flags.DEFINE_string(\"mode\",\"train\",\"Mode: train/ valid\")\n",
      "tf.flags.DEFINE_float(\"max_iters\",\"1e+5\",\"max training iterations of batches\")\n",
      "tf.flags.DEFINE_integer(\"num_classes\",\"151\",\"mit_sceneparsing with (150+1) classes\")\n",
      "tf.flags.DEFINE_integer(\"image_size\",\"224\",\"can be variable in deed\")\n",
      "tf.flags.DEFINE_string(\"model_weights\",\"http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat\",\"pretrained weights of the CNN in use\")\n",
      "tf.flags.DEFINE_string(\"full_model\",\"full_model/\",\"trained parameters of the whole network\")\n",
      "tf.flags.DEFINE_string(\"full_model_file\",\" \",\"pretrained parameters of the whole network\")\n",
      "tf.flags.DEFINE_bool(\"load\",\"False\",\"load in pretrained parameters\")\n",
      "\n",
      "# check if the CNN weights folder exist\n",
      "if not os.path.exists(FLAGS.model_dir):\n",
      "    os.makedirs(FLAGS.model_dir)\n",
      "\n",
      "# check if the CNN weights file exist\n",
      "weights_file = os.path.join(FLAGS.model_dir,FLAGS.model_weights.split('/')[-1])\n",
      "if not os.path.exists(weights_file):\n",
      "    print(\"\\ndownloading \"+weights_file+\" ...\")\n",
      "    os.system(\"wget \"+FLAGS.model_weights+\" -P \"+FLAGS.model_dir)\n",
      "    print(\"download finished!\\n\")\n",
      "else:\n",
      "    print(\"\\n\"+weights_file+\" has already been downloaded.\\n\")\n",
      "\n",
      "# load the weights file\n",
      "print(\"\\nloading pretrained weights from: \"+weights_file)\n",
      "pretrain_weights = scipy.io.loadmat(weights_file)\n",
      "print(\"loading finished!\\n\")\n",
      "    \n",
      "# the mean RGB\n",
      "mean = pretrain_weights['normalization'][0][0][0] # shape(224,224,3)\n",
      "mean_pixel = np.mean(mean,axis=(0,1)) # average on (height,width) to compute the mean RGB   \n",
      "\n",
      "# the weights and biases\n",
      "weights_biases = np.squeeze(pretrain_weights['layers'])\n",
      "\n",
      "# network input data\n",
      "dropout_prob = tf.placeholder(tf.float32,name=\"dropout_probability\")\n",
      "images = tf.placeholder(tf.float32,shape=[None,None,None,3],name=\"input_images\")\n",
      "annotations = tf.placeholder(tf.uint8,shape=[None,None,None,1],name=\"input_annotations\")\n",
      "\n",
      "# subtract the mean image\n",
      "processed_image = images - mean_pixel\n",
      "\n",
      "# construct the semantic_seg network\n",
      "with tf.variable_scope(\"semantic_seg\"):\n",
      "    # convs of the vgg net\n",
      "    net = {}\n",
      "    layers = [\n",
      "        'conv1_1','relu1_1','conv1_2','relu1_2','pool1',\n",
      "        'conv2_1','relu2_1','conv2_2','relu2_2','pool2',\n",
      "        'conv3_1','relu3_1','conv3_2','relu3_2','conv3_3','relu3_3','conv3_4','relu3_4','pool3',\n",
      "        'conv4_1','relu4_1','conv4_2','relu4_2','conv4_3','relu4_3','conv4_4','relu4_4','pool4',\n",
      "        'conv5_1','relu5_1','conv5_2','relu5_2','conv5_3','relu5_3','conv5_4','relu5_4','pool5'\n",
      "    ]\n",
      "    current = processed_image\n",
      "    for i,name in enumerate(layers):\n",
      "        type = name[:4]\n",
      "        if type == 'conv':\n",
      "            # matconvnet weights: (width, height, in_channels, out_channels)\n",
      "            # tensorflow weights: (height, width, in_channels, out_channels)\n",
      "            weights, biases = weights_biases[i][0][0][0][0]\n",
      "            weights = np.transpose(weights,(1,0,2,3)) \n",
      "            biases = np.squeeze(biases)\n",
      "            \n",
      "            init = tf.constant_initializer(weights,dtype=tf.float32)\n",
      "            weights = tf.get_variable(initializer=init,shape=weights.shape,name=name+\"_w\")\n",
      "            \n",
      "            init = tf.constant_initializer(biases,dtype=tf.float32)\n",
      "            biases = tf.get_variable(initializer=init,shape=biases.shape,name=name+\"_b\")\n",
      "            \n",
      "            current = tf.nn.conv2d(current,weights,strides=[1,1,1,1],padding=\"SAME\")\n",
      "            current = tf.nn.bias_add(current,biases,name=name)\n",
      "        elif type == 'relu':\n",
      "            current = tf.nn.relu(current,name=name)\n",
      "            if FLAGS.debug:\n",
      "                tf.histogram_summary(current.op.name+\"/activation\",current)\n",
      "                tf.scalar_summary(current.op.name+\"/sparsity\",tf.nn.zero_fraction(current))\n",
      "        elif type == 'pool':\n",
      "            if name == 'pool5':\n",
      "                current = tf.nn.max_pool(current,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\",name=name)\n",
      "            else:\n",
      "                current = tf.nn.avg_pool(current,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\",name=name)\n",
      "        net[name] = current\n",
      "                \n",
      "     # fcn6\n",
      "    init = tf.truncated_normal(shape=[7,7,512,4096],stddev=0.02)\n",
      "    fcn6_w = tf.get_variable(initializer=init,name=\"fcn6_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[4096])\n",
      "    fcn6_b = tf.get_variable(initializer=init,name=\"fcn6_b\")\n",
      "\n",
      "    fcn6 = tf.nn.conv2d(current,fcn6_w,strides=[1,1,1,1],padding=\"SAME\")\n",
      "    fcn6 = tf.nn.bias_add(fcn6,fcn6_b,name=\"fcn6\")\n",
      "\n",
      "    relu6 = tf.nn.relu(fcn6,name=\"relu6\")\n",
      "    if FLAGS.debug:\n",
      "        tf.histogram_summary(\"relu6/activation\", relu6, collections=None, name=None)\n",
      "        tf.scalar_summary(\"relu6/sparsity\", tf.nn.zero_fraction(relu6), collections=None, name=None)\n",
      "    dropout6 = tf.nn.dropout(relu6, keep_prob=dropout_prob, noise_shape=None, seed=None, name=\"dropout6\")\n",
      "\n",
      "     # fcn7\n",
      "    init = tf.truncated_normal(shape=[1,1,4096,4096],stddev=0.02)\n",
      "    fcn7_w = tf.get_variable(initializer=init,name=\"fcn7_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[4096])\n",
      "    fcn7_b = tf.get_variable(initializer=init,name=\"fcn7_b\")\n",
      "\n",
      "    fcn7 = tf.nn.conv2d(dropout6, fcn7_w, strides=[1,1,1,1], padding=\"SAME\", use_cudnn_on_gpu=None, data_format=None, name=None)\n",
      "    fcn7 = tf.nn.bias_add(fcn7, fcn7_b, data_format=None, name=\"fcn7\")\n",
      "\n",
      "    relu7 = tf.nn.relu(fcn7,name=\"relu7\")\n",
      "    if FLAGS.debug:\n",
      "        tf.histogram_summary(\"relu7/activation\", relu7, collections=None, name=None)\n",
      "        tf.scalar_summary(\"relu7/sparsity\", tf.nn.zero_fraction(relu7), collections=None, name=None)\n",
      "    dropout7 = tf.nn.dropout(relu7, keep_prob=dropout_prob, noise_shape=None, seed=None, name=\"dropout7\")\n",
      "\n",
      "    # fcn8\n",
      "    init = tf.truncated_normal(shape=[1,1,4096,FLAGS.num_classes],stddev=0.02)\n",
      "    fcn8_w = tf.get_variable(initializer=init,name=\"fcn8_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[FLAGS.num_classes])\n",
      "    fcn8_b = tf.get_variable(initializer=init,name=\"fcn8_b\")\n",
      "\n",
      "    fcn8 = tf.nn.conv2d(dropout7, fcn8_w, strides=[1,1,1,1], padding=\"SAME\", use_cudnn_on_gpu=None, data_format=None, name=None)\n",
      "    fcn8 = tf.nn.bias_add(fcn8, fcn8_b, data_format=None, name=\"fcn8\")\n",
      "\n",
      "    # deconv1 + net['pool4']: x32 -> x16\n",
      "    s = 2\n",
      "    k = 2*s\n",
      "    in_channel = FLAGS.num_classes\n",
      "    out_channel = net['pool4'].get_shape()[3].value\n",
      "    out_shape = tf.shape(net['pool4'])\n",
      "\n",
      "    init = tf.truncated_normal(shape=[k,k,out_channel,in_channel],stddev=0.02)\n",
      "    deconv1_w = tf.get_variable(initializer=init,name=\"deconv1_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[out_channel])\n",
      "    deconv1_b = tf.get_variable(initializer=init,name=\"deconv1_b\")\n",
      "\n",
      "    deconv1 = tf.nn.conv2d_transpose(fcn8, deconv1_w, output_shape=out_shape, strides=[1,s,s,1], padding='SAME', name=None)\n",
      "    deconv1 = tf.nn.bias_add(deconv1, deconv1_b, data_format=None, name=\"deconv1\")\n",
      "\n",
      "    fuse1 = tf.add(deconv1, net['pool4'], name=\"fuse1\")\n",
      "            \n",
      "    # deconv2 + net['pool3']: x16 -> x8\n",
      "    s = 2\n",
      "    k = 2*s\n",
      "    in_channel = out_channel\n",
      "    out_channel = net['pool3'].get_shape()[3].value\n",
      "    out_shape = tf.shape(net['pool3'])\n",
      "\n",
      "    init = tf.truncated_normal(shape=[k,k,out_channel,in_channel],stddev=0.02)\n",
      "    deconv2_w = tf.get_variable(initializer=init,name=\"deconv2_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[out_channel])\n",
      "    deconv2_b = tf.get_variable(initializer=init,name=\"deconv2_b\")\n",
      "\n",
      "    deconv2 = tf.nn.conv2d_transpose(fuse1, deconv2_w, output_shape=out_shape, strides=[1,s,s,1], padding='SAME', name=None)\n",
      "    deconv2 = tf.nn.bias_add(deconv2, deconv2_b, data_format=None, name=\"deconv2\")\n",
      "\n",
      "    fuse2 = tf.add(deconv2,net['pool3'],name=\"fuse2\")\n",
      "\n",
      "    # deconv3: x8 -> image_size\n",
      "    s = 8\n",
      "    k = 2*s\n",
      "    in_channel = out_channel\n",
      "    out_channel = FLAGS.num_classes\n",
      "    out_shape = tf.pack([tf.shape(processed_image)[0],tf.shape(processed_image)[1],tf.shape(processed_image)[2],out_channel])\n",
      "            \n",
      "    init = tf.truncated_normal(shape=[k,k,out_channel,in_channel],stddev=0.02)\n",
      "    deconv3_w = tf.get_variable(initializer=init,name=\"deconv3_w\")\n",
      "\n",
      "    init = tf.constant(0.0,shape=[out_channel])\n",
      "    deconv3_b = tf.get_variable(initializer=init,name=\"deconv3_b\")\n",
      "\n",
      "    deconv3 = tf.nn.conv2d_transpose(fuse2, deconv3_w, output_shape=out_shape, strides=[1,s,s,1], padding='SAME', name=None)\n",
      "    deconv3 = tf.nn.bias_add(deconv3, deconv3_b, data_format=None, name=\"deconv3\")\n",
      "\n",
      "    # per-pixel prediction\n",
      "    annotations_pred = tf.argmax(deconv3, dimension=3, name=None)\n",
      "    annotations_pred = tf.expand_dims(annotations_pred, dim=3, name=\"prediction\")\n",
      "\n",
      "# log images, annotations, annotations_pred\n",
      "tf.image_summary(\"images\", images, max_images=2, collections=None, name=None)\n",
      "tf.image_summary(\"annotations\", tf.cast(annotations,tf.uint8), max_images=2, collections=None, name=None)\n",
      "tf.image_summary(\"annotations_pred\", tf.cast(annotations_pred,tf.uint8), max_images=2, collections=None, name=None)\n",
      "\n",
      "# construct the loss\n",
      "loss = tf.nn.softmax_cross_entropy_with_logits(deconv3, annotations, dim=3, name=None)\n",
      "loss = tf.reduce_mean(loss, reduction_indices=None, keep_dims=False, name=\"pixel-wise_cross-entropy_loss\")\n",
      "\n",
      "# log the loss\n",
      "tf.scalar_summary(\"pixel-wise_cross-entropy_loss\", loss, collections=None, name=None)\n",
      "\n",
      "# log all the trainable variables\n",
      "trainabel_vars = tf.trainable_variables()\n",
      "if FLAGS.debug:\n",
      "    for var in trainabel_vars:\n",
      "        tf.histogram_summary(var.op.name+\"/values\", var, collections=None, name=None)\n",
      "        tf.add_to_collection(\"sum(t ** 2) / 2 of all trainable_vars\", tf.nn.l2_loss(var))\n",
      "        \n",
      "# construct the optimizier\n",
      "optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n",
      "gradients = optimizer.compute_gradients(loss,trainabel_vars)\n",
      "if FLAGS.debug:\n",
      "    # log the gradients\n",
      "    for grad, var in gradients:\n",
      "        tf.histogram_summary(var.op.name+\"/gradients\", grad, collections=None, name=None)\n",
      "train_op = optimizer.apply_gradients(gradients)\n",
      "\n",
      "# initialize the variables\n",
      "print(\"\\nInitializing the variables ...\\n\")\n",
      "sess = tf.InteractiveSession()\n",
      "tf.initialize_all_variables().run()\n",
      "\n",
      "# set up the saver\n",
      "print(\"\\nSetting up the Saver ...\\n\")\n",
      "saver = tf.train.Saver()\n",
      "if FLAGS.load:\n",
      "    print(\"\\nLoading pretrain parameters of the whole network ...\\n\")\n",
      "    saver.restore(sess, FLAGS.full_model_file)\n",
      "\n",
      "# set the summary writer\n",
      "print(\"\\nSetting the summary writers ...\\n\")\n",
      "summary_op = tf.merge_all_summaries()\n",
      "if not os.path.exists(FLAGS.logs_dir):\n",
      "    os.system(\"mkdir \"+FLAGS.logs_dir)\n",
      "if FLAGS.mode == 'train':\n",
      "    if os.path.exists(FLAGS.logs_dir+\"/train\"):\n",
      "        os.system(\"rm -r \"+FLAGS.logs_dir+\"/train\")\n",
      "    if os.path.exists(FLAGS.logs_dir+\"/valid\"):\n",
      "        os.system(\"rm -r \"+FLAGS.logs_dir+\"/valid\")\n",
      "    train_writer = tf.train.SummaryWriter(FLAGS.logs_dir+\"/train\",sess.graph)\n",
      "    valid_writer = tf.train.SummaryWriter(FLAGS.logs_dir+\"/valid\")\n",
      "elif FLAGS.mode == 'valid':\n",
      "    if os.path.exists(FLAGS.logs_dir+\"/complete_valid\"):\n",
      "        os.system(\"rm -r \"+FLAGS.logs_dir+\"/complete_valid\")\n",
      "    valid_writer = tf.train.SummaryWriter(FLAGS.logs_dir+\"/complete_valid\")    \n",
      "\n",
      "# read data_records from *.pickle\n",
      "print(\"\\nReading in and reprocessing all images ...\\n\")\n",
      "# check if FLAGS.data_dir folder exist\n",
      "if not os.path.exists(FLAGS.data_dir):\n",
      "    os.makedirs(FLAGS.data_dir)\n",
      "# check if the *.pickle file exist\n",
      "pickle_file = os.path.join(FLAGS.data_dir,FLAGS.pickle_name)\n",
      "if not os.path.exists(pickle_file):\n",
      "    # check if the *.zip exist\n",
      "    zip_file = os.path.join(FLAGS.data_dir,FLAGS.data_url.split('/')[-1])\n",
      "    if not os.path.exists(zip_file):\n",
      "        # download the *.zip\n",
      "        print(\"downloading \"+zip_file+\" ..\")\n",
      "        os.system(\"wget \"+FLAGS.data_url+\" -P \"+FLAGS.data_dir)\n",
      "        print(\"download finished!\")\n",
      "        # unzip the file\n",
      "        print(\"unzipping \"+zip_file+\" ..\")\n",
      "        os.system(\"unzip \"+zip_file+\" -d \"+FLAGS.data_dir)\n",
      "        print(\"unzipping finished!\")\n",
      "    # pack data into *.pickle\n",
      "    source_datadir =  zip_file.split('.')[0]\n",
      "    if not os.path.exists(source_datadir):\n",
      "        print(\"Error: source_datadir not found!!!\")\n",
      "        exit()\n",
      "    else:\n",
      "        data_types = ['train','valid']\n",
      "        data_list = {}\n",
      "        for data_type in data_types:\n",
      "            image_list = []\n",
      "            data_list[data_type] = []\n",
      "            # find all images\n",
      "            image_names = os.path.join(source_datadir,\"images\",data_type,'*.jpg')\n",
      "            image_list.extend(glob.glob(image_names))\n",
      "            if not image_list:\n",
      "                print(\"Error: no images found for \"+data_type+\"!!!\")\n",
      "                exit()\n",
      "            else:\n",
      "                # find corresponding annotations\n",
      "                for i in image_list:\n",
      "                    image_name = (i.split('/')[-1]).split('.')[0]\n",
      "                    annotation_name = os.path.join(source_datadir,\"annotations\",data_type,image_name+\".png\")\n",
      "                    if os.path.exists(annotation_name):\n",
      "                        # record this data tuple\n",
      "                        record = {'image':i,'annotation':annotation_name,'filename':image_name}\n",
      "                        data_list[data_type].append(record)\n",
      "            # shuffle all tuples\n",
      "            random.shuffle(data_list[data_type])\n",
      "            print(\"Number of %s tuples: %d\"%(data_type,len(data_list[data_type])))\n",
      "    print(\"Packing data into \"+pickle_file+\" ...\")\n",
      "    with open(pickle_file,'wb') as f:\n",
      "        pickle.dump(data_list,f,pickle.HIGHEST_PROTOCOL)\n",
      "    print(\"pickle finished!!!\")\n",
      "# load data_records from *.pickle\n",
      "with open(pickle_file,'rb') as f:\n",
      "    pickle_records = pickle.load(f)\n",
      "    train_records = pickle_records['train']\n",
      "    valid_records = pickle_records['valid']\n",
      "    del pickle_records\n",
      "    \n",
      "# initialize the data reader\n",
      "print(\"Initializing the data reader...\")\n",
      "reader_optiions = {'resize':True,'resize_size':FLAGS.image_size}\n",
      "if FLAGS.mode == 'train':\n",
      "    train_reader = dataset.BatchDatset(train_records,reader_options)\n",
      "valid_reader = dataset.BatchDatset(valid_records,reader_options)\n",
      "\n",
      "# check if FLAGS.full_model exist\n",
      "if not os.path.exists(FLAGS.full_model):\n",
      "    os.makedirs(FLAGS.full_model)\n",
      "\n",
      "# start training/ testing\n",
      "if FLAGS.mode == 'train':\n",
      "    for itr in xrange(FLAGS.max_iters):\n",
      "        # read next batch\n",
      "        train_images, train_annotations = train_reader.next_batch(FLAGS.batch_size)\n",
      "        feed_dict = {images:train_images,annotations:train_annotations,dropout_prob:0.85}\n",
      "        # training\n",
      "        sess.run(train_op,feed_dict=feed_dict)\n",
      "        # log training info\n",
      "        if itr % 10 == 0:\n",
      "            train_loss, train_summary = sess.run([loss,summary_op],feed_dict=feed_dict)\n",
      "            train_writer.add_summary(train_summary,itr)\n",
      "            print(\"Step: %d, train_loss: %f\"%(itr,train_loss))\n",
      "        # log valid info\n",
      "        if itr % 100 == 0:\n",
      "            valid_images, valid_annotations = valid_reader.get_random_batch(FLAGS.batch_size)\n",
      "            feed_dict = {images:valid_images,annotations:valid_annotations,dropout_prob:1.0}\n",
      "            valid_loss, valid_summary = sess.run([loss,summary_op],feed_dict=feed_dict)\n",
      "            valid_writer.add_summary(valid_summary,itr)\n",
      "            print(\"==============================\")\n",
      "            print(\"Step: %d, valid_loss: %f\"%(itr,valid_loss))\n",
      "            print(\"==============================\")\n",
      "        # save snapshot\n",
      "        if itr % 500 == 0:\n",
      "            snapshot_name = os.path.join(FLAGS.full_model,str(itr)+\"_model.ckpt\")\n",
      "            saver.save(sess,snapshot_name)\n",
      "elif FLAGS.mode == 'valid':\n",
      "    # quantitative results\n",
      "    valid_images,valid_annotations=valid_reader.get_records()\n",
      "    feed_dict = {images:valid_images,annotations:valid_annotations,dropout_prob:1.0}\n",
      "    valid_loss,valid_summary = sess.run([loss,summary_op],feed_dict=feed_dict)\n",
      "    valid_writer.add_summary(valid_summary,FLAGS.max_iters)\n",
      "    print(\"==============================\")\n",
      "    print(\"Step: %d, valid_loss: %f\"%(FLAGS.max_iters,valid_loss))\n",
      "    print(\"==============================\")\n",
      "    # qualitative results\n",
      "    valid_images,valid_annotations=valid_reader.get_random_batch(FLAGS.batch_size)\n",
      "    feed_dict = {images:valid_images,annotations:valid_annotations,dropout_prob:1.0}\n",
      "    annotations_pred_results = sess.run(annotations_pred,feed_dict=feed_dict)\n",
      "    \n",
      "    valid_annotations = np.squeeze(valid_annotations,axis=3)\n",
      "    annotations_pred_results = np.squeeze(annotations_pred_results,axis=3)\n",
      "    \n",
      "    for n in xrange(FLAGS.batch_size):\n",
      "        print(\"Saving %d valid tuples for qualitative comparisons...\")\n",
      "        misc.imsave(FLAGS.logs_dir+\"/complete_valid/\"+str(n)+\"_image.png\",valid_images[n].astype(np.uint8))\n",
      "        misc.imsave(FLAGS.logs_dir+\"/complete_valid/\"+str(n)+\"_annotation.png\",valid_annotations[n].astype(np.uint8))\n",
      "        misc.imsave(FLAGS.logs_dir+\"/complete_valid/\"+str(n)+\"_prediction.png\",annotations_pred_results[n].astype(np.uint8))\n",
      "        print(\"saving finished!!!\")"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "Model_zoo/imagenet-vgg-verydeep-19.mat has already been downloaded.\n",
        "\n",
        "\n",
        "loading pretrained weights from: Model_zoo/imagenet-vgg-verydeep-19.mat\n",
        "loading finished!\n"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "\n",
        "Initializing the variables ...\n"
       ]
      }
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 0
    }
   ],
   "metadata": {}
  }
 ]
}