{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/env python3\n",
    "# -*- coding: utf-8 -*-\n",
    "import argparse#作用就是为py文件封装好可以选择的参数，使他们更加灵活，丰富；命令行解析库\n",
    "import os\n",
    "import time\n",
    "\n",
    "import cv2#OpenCV官方的一个扩展库\n",
    "import numpy as np#数据处理\n",
    "import tensorflow as tf\n",
    "\n",
    "import pydensecrf.densecrf as dcrf#对FCN等处理后的图像进行优化处理\n",
    "import vgg\n",
    "from dataset import inputs\n",
    "from pydensecrf.utils import (create_pairwise_bilateral,\n",
    "                              create_pairwise_gaussian, unary_from_softmax)\n",
    "from utils import (bilinear_upsample_weights, grayscale_to_voc_impl)#导入同一级目录下的文件定义函数\n",
    "\n",
    "import logging#用于输出运行日志\n",
    "\n",
    "logging.basicConfig(format='%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s', level=logging.DEBUG)#配置文件\n",
    "\n",
    "\n",
    "def parse_args(check=True):#解析参数\n",
    "    parser = argparse.ArgumentParser()#命令行解析的主要入口点\n",
    "    parser.add_argument('--checkpoint_path', type=str)#add_argument，可添加的参数类型\n",
    "    parser.add_argument('--output_dir', type=str)\n",
    "    parser.add_argument('--dataset_train', type=str)\n",
    "    parser.add_argument('--dataset_val', type=str)\n",
    "    parser.add_argument('--batch_size', type=int, default=16)\n",
    "    parser.add_argument('--max_steps', type=int, default=1500)\n",
    "    parser.add_argument('--learning_rate', type=float, default=1e-4)\n",
    "\n",
    "    FLAGS, unparsed = parser.parse_known_args()\n",
    "    return FLAGS, unparsed\n",
    "\n",
    "\n",
    "FLAGS, unparsed = parse_args()\n",
    "\n",
    "slim = tf.contrib.slim\n",
    "\n",
    "\n",
    "tf.reset_default_graph()#进入计算图\n",
    "is_training_placeholder = tf.placeholder(tf.bool)\n",
    "batch_size = FLAGS.batch_size\n",
    "\n",
    "image_tensor_train, orig_img_tensor_train, annotation_tensor_train = inputs(FLAGS.dataset_train, train=True, batch_size=batch_size, num_epochs=1e4)\n",
    "image_tensor_val, orig_img_tensor_val, annotation_tensor_val = inputs(FLAGS.dataset_val, train=False, num_epochs=1e4)\n",
    "\n",
    "image_tensor, orig_img_tensor, annotation_tensor = tf.cond(is_training_placeholder,\n",
    "                                                           true_fn=lambda: (image_tensor_train, orig_img_tensor_train, annotation_tensor_train),\n",
    "                                                           false_fn=lambda: (image_tensor_val, orig_img_tensor_val, annotation_tensor_val))\n",
    "\n",
    "feed_dict_to_use = {is_training_placeholder: True}\n",
    "\n",
    "upsample_factor = 8#上采样倍数\n",
    "number_of_classes = 21#21个分类\n",
    "\n",
    "log_folder = os.path.join(FLAGS.output_dir, 'train')#将路径组合返回\n",
    "\n",
    "vgg_checkpoint_path = FLAGS.checkpoint_path\n",
    "\n",
    "# Creates a variable to hold the global_step.\n",
    "global_step = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int64)\n",
    "\n",
    "\n",
    "# Define the model that we want to use -- specify to use only two classes at the last layer\n",
    "with slim.arg_scope(vgg.vgg_arg_scope()):##调用slim下面的nets,来在我们下面的计算图中构建一个Vgg16的这样一个网络\n",
    "    logits, end_points = vgg.vgg_16(image_tensor,\n",
    "                                    num_classes=number_of_classes,#分类数\n",
    "                                    is_training=is_training_placeholder,#关系到dropout是否采用\n",
    "                                    spatial_squeeze=False,#是否将维数为1的维度去掉\n",
    "                                    fc_conv_padding='SAME')#保持信息的空间尺度\n",
    "\n",
    "downsampled_logits_shape = tf.shape(logits)\n",
    "\n",
    "img_shape = tf.shape(image_tensor)\n",
    "\n",
    "# Calculate the ouput size of the upsampled tensor\n",
    "# The shape should be batch_size X width X height X num_classes\n",
    "upsampled_logits_shape = tf.stack([\n",
    "                                  downsampled_logits_shape[0],\n",
    "                                  img_shape[1],\n",
    "                                  img_shape[2],\n",
    "                                  downsampled_logits_shape[3]\n",
    "                                  ])\n",
    "\n",
    "\n",
    "##############################################8 upsample factor codes#####################################################\n",
    "\n",
    "\n",
    "pool3_feature = end_points['vgg_16/pool3']#pool3的结果\n",
    "with tf.variable_scope('vgg_16/fc8'):\n",
    "    aux_logits_8s = slim.conv2d(pool3_feature,    #定义新的1*1的卷积，对pool3的feature进行一个number_of_classes分类\n",
    "                                number_of_classes,\n",
    "                                [1, 1],\n",
    "                                 activation_fn=None,#不需要激活函数\n",
    "                                 weights_initializer=tf.zeros_initializer,#刚开始全部为0\n",
    "                                 scope='conv_pool3')\n",
    "\n",
    "# Perform the upsampling\n",
    "upsample_filter_np_x4 = bilinear_upsample_weights(4,  # upsample_factor  ##将双线性插值得到的filter转换成我们的权重，再用权重去初始化我们的kernel,\n",
    "                                                  number_of_classes)\n",
    "\n",
    "upsample_filter_tensor_x4 = tf.Variable(upsample_filter_np_x4,#定义Variable变量\n",
    "                                        name='vgg_16/fc8/t_conv_x4')\n",
    "\n",
    "upsampled_logits = tf.nn.conv2d_transpose(logits,#对logits的结果进行一个4倍的上采样，（反卷积，转置卷积）\n",
    "                                          upsample_filter_tensor_x4,\n",
    "                                          output_shape=tf.shape(aux_logits_8s),# #得到的卷积核和pool3的尺寸完全一样的\n",
    "                                          strides=[1, 4, 4, 1],\n",
    "                                          padding='SAME')\n",
    "\n",
    "\n",
    "upsampled_logits = upsampled_logits + aux_logits_8s##将pool3分类的结果和我们最终的分类结果进行上采样之后的结果进行加和\n",
    "\n",
    "upsample_filter_np_x8 = bilinear_upsample_weights(upsample_factor,#将整合的logits双线性插值....\n",
    "                                                   number_of_classes)\n",
    "\n",
    "upsample_filter_tensor_x8 = tf.Variable(upsample_filter_np_x8,\n",
    "                                        name='vgg_16/fc8/t_conv_x8')\n",
    "upsampled_logits = tf.nn.conv2d_transpose(upsampled_logits,##logits作为input\n",
    "                                          upsample_filter_tensor_x8,#上采样需要用到的kernel,双线性插值得到的kernel \n",
    "                                          output_shape=upsampled_logits_shape,#我们希望得到的精确的输出的大小，\n",
    "                                                                                #到底是多大，在caffe里面是指定位置裁切\n",
    "                                                                                #在tf里面已经把这一步整合到方法里面去，只需要告诉他\n",
    "                                                                                #精确的大小是多大，就会在相应的位置计算出来，并进行裁切，方便\n",
    "                                          strides=[1, upsample_factor,upsample_factor, 1],#strides=[1,32,32,1]是在缩小了32倍的这样的空间\n",
    "                                                                                           #尺度进行反卷积，所以是32\n",
    "                                          padding='SAME')\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "##############################################8 upsample factor codes#####################################################\n",
    "\n",
    "\n",
    "lbl_onehot = tf.one_hot(annotation_tensor, number_of_classes)\n",
    "cross_entropies = tf.nn.softmax_cross_entropy_with_logits(logits=upsampled_logits,\n",
    "                                                          labels=lbl_onehot)\n",
    "\n",
    "cross_entropy_loss = tf.reduce_mean(tf.reduce_sum(cross_entropies, axis=-1))\n",
    "\n",
    "\n",
    "# Tensor to get the final prediction for each pixel -- pay\n",
    "# attention that we don't need softmax in this case because\n",
    "# we only need the final decision. If we also need the respective\n",
    "# probabilities we will have to apply softmax.\n",
    "pred = tf.argmax(upsampled_logits, axis=3)\n",
    "\n",
    "probabilities = tf.nn.softmax(upsampled_logits)\n",
    "\n",
    "# Here we define an optimizer and put all the variables\n",
    "# that will be created under a namespace of 'adam_vars'.\n",
    "# This is done so that we can easily access them later.\n",
    "# Those variables are used by adam optimizer and are not\n",
    "# related to variables of the vgg model.\n",
    "\n",
    "# We also retrieve gradient Tensors for each of our variables\n",
    "# This way we can later visualize them in tensorboard.\n",
    "# optimizer.compute_gradients and optimizer.apply_gradients\n",
    "# is equivalent to running:\n",
    "# train_step = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cross_entropy_loss)\n",
    "with tf.variable_scope(\"adam_vars\"):\n",
    "    optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)\n",
    "    gradients = optimizer.compute_gradients(loss=cross_entropy_loss)\n",
    "\n",
    "    for grad_var_pair in gradients:\n",
    "\n",
    "        current_variable = grad_var_pair[1]\n",
    "        current_gradient = grad_var_pair[0]\n",
    "\n",
    "        # Relace some characters from the original variable name\n",
    "        # tensorboard doesn't accept ':' symbol\n",
    "        gradient_name_to_save = current_variable.name.replace(\":\", \"_\")\n",
    "\n",
    "        # Let's get histogram of gradients for each layer and\n",
    "        # visualize them later in tensorboard\n",
    "        tf.summary.histogram(gradient_name_to_save, current_gradient)\n",
    "\n",
    "    train_step = optimizer.apply_gradients(grads_and_vars=gradients, global_step=global_step)\n",
    "\n",
    "# Now we define a function that will load the weights from VGG checkpoint\n",
    "# into our variables when we call it. We exclude the weights from the last layer\n",
    "# which is responsible for class predictions. We do this because\n",
    "# we will have different number of classes to predict and we can't\n",
    "# use the old ones as an initialization.\n",
    "vgg_except_fc8_weights = slim.get_variables_to_restore(exclude=['vgg_16/fc8', 'adam_vars'])\n",
    "\n",
    "# Here we get variables that belong to the last layer of network.\n",
    "# As we saw, the number of classes that VGG was originally trained on\n",
    "# is different from ours -- in our case it is only 2 classes.\n",
    "vgg_fc8_weights = slim.get_variables_to_restore(include=['vgg_16/fc8'])\n",
    "\n",
    "adam_optimizer_variables = slim.get_variables_to_restore(include=['adam_vars'])\n",
    "\n",
    "# Add summary op for the loss -- to be able to see it in\n",
    "# tensorboard.\n",
    "tf.summary.scalar('cross_entropy_loss', cross_entropy_loss)\n",
    "\n",
    "# Put all summary ops into one op. Produces string when\n",
    "# you run it.\n",
    "merged_summary_op = tf.summary.merge_all()\n",
    "\n",
    "# Create the summary writer -- to write all the logs\n",
    "# into a specified file. This file can be later read\n",
    "# by tensorboard.\n",
    "summary_string_writer = tf.summary.FileWriter(log_folder)\n",
    "\n",
    "# Create the log folder if doesn't exist yet\n",
    "if not os.path.exists(log_folder):\n",
    "    os.makedirs(log_folder)\n",
    "\n",
    "checkpoint_path = tf.train.latest_checkpoint(log_folder)\n",
    "continue_train = False\n",
    "if checkpoint_path:\n",
    "    tf.logging.info(\n",
    "        'Ignoring --checkpoint_path because a checkpoint already exists in %s'\n",
    "        % log_folder)\n",
    "    variables_to_restore = slim.get_model_variables()\n",
    "\n",
    "    continue_train = True\n",
    "\n",
    "else:\n",
    "\n",
    "    # Create an OP that performs the initialization of\n",
    "    # values of variables to the values from VGG.\n",
    "    read_vgg_weights_except_fc8_func = slim.assign_from_checkpoint_fn(\n",
    "        vgg_checkpoint_path,\n",
    "        vgg_except_fc8_weights)\n",
    "\n",
    "    # Initializer for new fc8 weights -- for two classes.\n",
    "    vgg_fc8_weights_initializer = tf.variables_initializer(vgg_fc8_weights)\n",
    "\n",
    "    # Initializer for adam variables\n",
    "    optimization_variables_initializer = tf.variables_initializer(adam_optimizer_variables)\n",
    "\n",
    "\n",
    "sess_config = tf.ConfigProto()\n",
    "sess_config.gpu_options.allow_growth = True\n",
    "sess = tf.Session(config=sess_config)\n",
    "\n",
    "init_op = tf.global_variables_initializer()\n",
    "init_local_op = tf.local_variables_initializer()\n",
    "\n",
    "saver = tf.train.Saver(max_to_keep=5)\n",
    "\n",
    "\n",
    "def perform_crf(image, probabilities):\n",
    "\n",
    "    image = image.squeeze()\n",
    "    softmax = probabilities.squeeze().transpose((2, 0, 1))\n",
    "\n",
    "    # The input should be the negative of the logarithm of probability values\n",
    "    # Look up the definition of the softmax_to_unary for more information\n",
    "    unary = unary_from_softmax(softmax)\n",
    "\n",
    "    # The inputs should be C-continious -- we are using Cython wrapper\n",
    "    unary = np.ascontiguousarray(unary)\n",
    "\n",
    "    d = dcrf.DenseCRF(image.shape[0] * image.shape[1], number_of_classes)\n",
    "\n",
    "    d.setUnaryEnergy(unary)\n",
    "\n",
    "    # This potential penalizes small pieces of segmentation that are\n",
    "    # spatially isolated -- enforces more spatially consistent segmentations\n",
    "    feats = create_pairwise_gaussian(sdims=(10, 10), shape=image.shape[:2])\n",
    "\n",
    "    d.addPairwiseEnergy(feats, compat=3,\n",
    "                        kernel=dcrf.DIAG_KERNEL,\n",
    "                        normalization=dcrf.NORMALIZE_SYMMETRIC)\n",
    "\n",
    "    # This creates the color-dependent features --\n",
    "    # because the segmentation that we get from CNN are too coarse\n",
    "    # and we can use local color features to refine them\n",
    "    feats = create_pairwise_bilateral(sdims=(50, 50), schan=(20, 20, 20),\n",
    "                                      img=image, chdim=2)\n",
    "\n",
    "    d.addPairwiseEnergy(feats, compat=10,\n",
    "                        kernel=dcrf.DIAG_KERNEL,\n",
    "                        normalization=dcrf.NORMALIZE_SYMMETRIC)\n",
    "    Q = d.inference(5)\n",
    "\n",
    "    res = np.argmax(Q, axis=0).reshape((image.shape[0], image.shape[1]))\n",
    "    return res\n",
    "\n",
    "\n",
    "with sess:\n",
    "    # Run the initializers.\n",
    "    sess.run(init_op)\n",
    "    sess.run(init_local_op)\n",
    "    if continue_train:\n",
    "        saver.restore(sess, checkpoint_path)\n",
    "\n",
    "        logging.debug('checkpoint restored from [{0}]'.format(checkpoint_path))\n",
    "    else:\n",
    "        sess.run(vgg_fc8_weights_initializer)\n",
    "        sess.run(optimization_variables_initializer)\n",
    "\n",
    "        read_vgg_weights_except_fc8_func(sess)\n",
    "        logging.debug('value initialized...')\n",
    "\n",
    "    # start data reader\n",
    "    coord = tf.train.Coordinator()\n",
    "    threads = tf.train.start_queue_runners(coord=coord)\n",
    "\n",
    "    start = time.time()\n",
    "    for i in range(FLAGS.max_steps):\n",
    "        feed_dict_to_use[is_training_placeholder] = True\n",
    "\n",
    "        gs, _ = sess.run([global_step, train_step], feed_dict=feed_dict_to_use)\n",
    "        if gs % 10 == 0:\n",
    "            gs, loss, summary_string = sess.run([global_step, cross_entropy_loss, merged_summary_op], feed_dict=feed_dict_to_use)\n",
    "            logging.debug(\"step {0} Current Loss: {1} \".format(gs, loss))\n",
    "            end = time.time()\n",
    "            logging.debug(\"[{0:.2f}] imgs/s\".format(10 * batch_size / (end - start)))\n",
    "            start = end\n",
    "\n",
    "            summary_string_writer.add_summary(summary_string, i)\n",
    "\n",
    "            if gs % 100 == 0:\n",
    "                save_path = saver.save(sess, os.path.join(log_folder, \"model.ckpt\"), global_step=gs)\n",
    "                logging.debug(\"Model saved in file: %s\" % save_path)\n",
    "\n",
    "            if gs % 200 == 0:\n",
    "                eval_folder = os.path.join(FLAGS.output_dir, 'eval')\n",
    "                if not os.path.exists(eval_folder):\n",
    "                    os.makedirs(eval_folder)\n",
    "\n",
    "                logging.debug(\"validation generated at step [{0}]\".format(gs))\n",
    "                feed_dict_to_use[is_training_placeholder] = False\n",
    "                val_pred, val_orig_image, val_annot, val_poss = sess.run([pred, orig_img_tensor, annotation_tensor, probabilities],\n",
    "                                                                         feed_dict=feed_dict_to_use)\n",
    "\n",
    "                cv2.imwrite(os.path.join(eval_folder, 'val_{0}_img.jpg'.format(gs)), cv2.cvtColor(np.squeeze(val_orig_image), cv2.COLOR_RGB2BGR))\n",
    "                cv2.imwrite(os.path.join(eval_folder, 'val_{0}_annotation.jpg'.format(gs)),  cv2.cvtColor(grayscale_to_voc_impl(np.squeeze(val_annot)), cv2.COLOR_RGB2BGR))\n",
    "                cv2.imwrite(os.path.join(eval_folder, 'val_{0}_prediction.jpg'.format(gs)),  cv2.cvtColor(grayscale_to_voc_impl(np.squeeze(val_pred)), cv2.COLOR_RGB2BGR))\n",
    "\n",
    "                crf_ed = perform_crf(val_orig_image, val_poss)\n",
    "                cv2.imwrite(os.path.join(FLAGS.output_dir, 'eval', 'val_{0}_prediction_crfed.jpg'.format(gs)), cv2.cvtColor(grayscale_to_voc_impl(np.squeeze(crf_ed)), cv2.COLOR_RGB2BGR))\n",
    "\n",
    "                overlay = cv2.addWeighted(cv2.cvtColor(np.squeeze(val_orig_image), cv2.COLOR_RGB2BGR), 1, cv2.cvtColor(grayscale_to_voc_impl(np.squeeze(crf_ed)), cv2.COLOR_RGB2BGR), 0.8, 0)\n",
    "                cv2.imwrite(os.path.join(FLAGS.output_dir, 'eval', 'val_{0}_overlay.jpg'.format(gs)), overlay)\n",
    "\n",
    "    coord.request_stop()\n",
    "    coord.join(threads)\n",
    "\n",
    "    save_path = saver.save(sess, os.path.join(log_folder, \"model.ckpt\"), global_step=gs)\n",
    "    logging.debug(\"Model saved in file: %s\" % save_path)\n",
    "\n",
    "summary_string_writer.close()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
