# -*- coding: UTF-8 -*-

import numpy as np
import tensorflow as tf
import os,cv2,math
from data_helper import process_data, get_data


# Refer to https://github.com/HasnainRaz/FC-DenseNet-TensorFlow
    



class DenseNet(object):
      def __init__(self,mode='Training', image_width=512, image_height=512):
          '''
          Initializes the DenseNet based on the specific parameters.
          Args:
               growth_k : an integer, growth rate of the densent.
               layers_per_block: List of integers, the number of layers in each dense block.
               num_classes: an integer, Numbers of classes to segment, actually 2 in this project.
           '''
          
          self.mode = mode
          self.growth_k = 16
          self.layers_per_block = [2,3,3]
          self.nb_blocks = len(self.layers_per_block)
          self.num_classes = 3
          self.uniform_image_w = image_width
          self.uniform_image_h = image_height                # As tf.placeholder needs a fixed size input, the image and label are resized, it can be changed when inferring.
          self.predicted_trimaps = None
          #################################################  hyper-parameters
          self.images = None
          self.labels = None
          self.trimaps = None
          if mode == 'Training':
             self.images,self.labels = get_data('data')
             # Just get directories of images and labels
          self.input_images = tf.placeholder(tf.float32,[None,self.uniform_image_h,self.uniform_image_w,3],name='input_images_placeholder')
          self.gt_trimaps = tf.placeholder(tf.int32,[None,self.uniform_image_h,self.uniform_image_w,1])

      def xentropy_loss(self,logits,labels):
          '''
          Calculates the cross-entropy loss over each pixel in the ground truth and the prediction.
          Args:
              logits: Tensor, raw unscaled predictions from the network. It predicts a 3-channel (self.num_classes-channel) mask.
              labels: Tensor, the ground truth segmentation mask. It has a shape of [Batch_size, height, width,1]
      
          Returns:
              loss: The cross entropy loss over each image in the batch.
          '''
          labels = tf.cast(tf.div(tf.cast(labels,tf.int32),127), tf.int32)
          # 0=tf.div(0,127); 1=tf.div(128,127); 2=tf.div(255,127)
          logits = tf.reshape(logits, [tf.shape(logits)[0], -1, self.num_classes])
          # shape: [Batch_size, All_Pixels_in_an_Image,3]  
          labels = tf.reshape(labels, [tf.shape(labels)[0], -1])
          # shape: [Batch_size, All_Pixels_in_an_Image]
          loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name="loss")
          return loss

      def calculate_iou(self,prediction,mask):
          '''
              Calculates the mean intersection over union (mean pixel accuracy)
              Args:
                    mask: Tensor, The ground truth input segmentation mask. Its shape is [Batch_size,height,width,1]
                    prediction: Tensor, the raw unscaled prediction from the network. Its shape is [Batch_size,height,width,self.num_classes(3)]
              Returns:
                     iou: Tensor, average iou over the batch.
                     update_op: Tensor op, update operation for the iou metric.
          '''
          mask_triple = tf.div(tf.cast(mask,tf.int8),127)
          # 0=tf.div(0,127); 1=tf.div(128,127); 2=tf.div(255,127)
          mask_triple = tf.cast(mask_triple,tf.int32)
          mask_triple = tf.reshape(tf.one_hot(tf.squeeze(mask_triple), depth=self.num_classes), [tf.shape(mask)[0], -1, self.num_classes])
          # shape: [Batch_size, All_Pixels_in_an_Image,3]
          prediction = tf.reshape(prediction,shape=[tf.shape(prediction)[0], -1, self.num_classes])
          # shape: [Batch_size, All_Pixels_in_an_Image,3]
          iou, update_op = tf.metrics.mean_iou(tf.argmax(prediction,axis=2),tf.argmax(mask_triple,axis=2),self.num_classes)
          return iou, update_op


      @staticmethod
      def batch_norm(x,mode,name):
          '''
          Wrapper for batch normalization in tensorflow, updates moving batch statistics if training, uses trained parameters if inferring.
          Args:
               x: Tensor, the input to normalize.
               mode: Choose Training or Testing, it is defined at __init__ function.
               name: String, name of the op in the graph.

          Returns:
               x: Batch normalized input.
          '''
          with tf.variable_scope(name):
               if mode == 'Training':
                  x = tf.contrib.layers.batch_norm(x,is_training=True,scope=name+'_batch_norm')
               else:
                  x = tf.contrib.layers.batch_norm(x,is_training=False,scope=name+'_batch_norm')
          return x  


      def conv_layer(self, x, mode, filters, name):
          '''
          Forms the atomic layer of the network, performs three operations in sequence:   batch normalization -> Relu -> 2D convolution.
          Args:
               x: Tensor, input feature map.
               mode: Choose Training or Testing, it is defined at __init__ function.
               filters: Integer, indicating the number of output feature channels.
               name: String, naming the op in the graph.

          Returns:
               x: Tensor. Result of applying batch norm -> Relu -> Convolution.
          '''
          with tf.name_scope(name):
               x = self.batch_norm(x, mode, name=name+'_bn')
               x = tf.nn.relu(x,name=name+'_relu')
               x = tf.layers.conv2d(x, filters=filters, kernel_size = [3,3], strides=[1,1], padding='SAME', dilation_rate=[1,1], activation=None, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), name=name+'_conv3x3')
              # **NOTE** Different from tf.nn.conv2d() which needs to define a filter with the shape of [filter_height,filter_width,input_channels,output_channels], but tf.layers.conv2d() only needs output channels in parameter filters, and 2D shape [filter_height, filter_width] in parameter kernel size. These two functions are equal, but tf.layers.conv2d() can be used as an interface when the input size are different. **
               if mode == 'Training':
                  training_flag = True
               else:
                  training_flag = False
               x = tf.layers.dropout(x, rate=0.2, training = training_flag, name = name+'_dropout')
          return x


      def dense_block(self,x,mode,block_nb,name):
          '''
          Forms the dense block to calculate features at a specified growth rate.
          Each conv layer in the dense block calculate growth_k feature maps, which are sequentially concatenated to build a larger final output.
          Compared to the classical densenet which have both bottleneck layer in each dense block and transition layer between two blocks, this version is a bit different, do not have bottleneck explictly.
          Refer to https://github.com/taki0112/Densenet-Tensorflow/blob/master/MNIST/Densenet_MNIST.py for classical resnet.
        
          Args:
               x: Tensor, input to the Dense Block.
               mode: Choose Training or Testing, it is defined at __init__ function.
               block_nb: Integer, idetifying the layers in the graph.
               name: String, identifying the layers in the graph.

          Returns:
               x: Tensor, the output of the dense block.
          '''
          dense_out = []
          with tf.name_scope(name):
               for i in range (self.layers_per_block[block_nb]):
                   conv = self.conv_layer(x,mode,self.growth_k,name=name+'_layer_'+str(i))
                   x = tf.concat([conv,x],axis=3)
                   dense_out.append(conv)

               x = tf.concat(dense_out,axis=3)
          return x


      def transition_down(self, x, mode,filters,name):
          '''
          Down-sample the input feature map by half using maxpooling.
          Args:
               x: Tensor, input to downsample.
               mode: Choose Training or Testing, it is defined at __init__ function.
               filters: Integer, indicating the number of output feature channels.
               name: String, identifying the ops in the graph.
             
          Returns:
              x: Tensor, result of downsampling.      
          '''
          with tf.name_scope(name):
               x = self.batch_norm(x,mode,name=name+'_bn')
               x = tf.nn.relu(x,name=name+'_relu')
               x = tf.layers.conv2d(x, filters=filters, kernel_size=[1,1],strides=[1,1],padding='SAME',dilation_rate=[1,1],activation=None, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), name=name+'_conv1x1')
               #**NOTE** Here, tf.layers.conv2d() is used in as in self.conv_layer()
               if mode == 'Training':
                    training_flag = True
               else:
                    training_flag = False
               x = tf.layers.dropout(x, rate=0.2, training = training_flag, name = name+'_dropout')
               x = tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name=name+'_maxpool2x2')

          return x



      def transition_up(self, x, filters, name):
          '''
          Up-sample the input feature maps using transpose convolutions.
          Args:
              x: Tensor, input feature map to upsample.
              filters: Integer, indicating the number of output feature channels.
              name: String, identifying the ops in the graph.

          Returns:
              x:Tensor, result of up-sampling. 
          '''
          with tf.name_scope(name):
               x = tf.layers.conv2d_transpose(x, filters=filters, kernel_size=[3,3], strides=[2,2], padding='SAME', activation=None, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), name=name+'_trans_conv3x3' )

          return x
              
      def _build_model(self):
          '''
          Defines the complete graph model. 
          '''
          concats = []
          with tf.variable_scope('encoder',reuse = tf.AUTO_REUSE):
               x = tf.layers.conv2d(self.input_images, filters=48, kernel_size=[3,3], strides=[1,1],padding='SAME', dilation_rate=[1,1], activation=None, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), name='first_conv3x3')
               for block_nb in range(0,self.nb_blocks):
                   dense = self.dense_block(x,self.mode,block_nb, 'down_dense_block_'+str(block_nb))
                   if block_nb != self.nb_blocks-1:
                      x = tf.concat([x,dense], axis=3, name='down_concat_'+str(block_nb))
                      concats.append(x)
                      x = self.transition_down(x,self.mode,x.get_shape()[-1],name='trans_down_'+str(block_nb))
   
               x = dense

          with tf.variable_scope('decoder',reuse = tf.AUTO_REUSE):
               for i, block_nb in enumerate(range(self.nb_blocks-1, 0, -1)):
               #Reverse traverse in [0,nb_blocks-1], i.e., it produces a tuple like (0, nb_blocks-1), (1, nb_blocks-2), (2, nb_blocks-3) ...
                   x = self.transition_up(x, x.get_shape()[-1], name='trans_up_'+str(block_nb))
                   x = tf.concat([x, concats[len(concats)-i-1]], axis=3, name='up_concat_'+str(block_nb))
                   x = self.dense_block(x, self.mode, block_nb, 'up_dense_block_'+str(block_nb))

          with tf.variable_scope('prediction',reuse = tf.AUTO_REUSE):
               x = tf.layers.conv2d(x, filters=self.num_classes, kernel_size=[1,1], strides=[1,1], padding='SAME', dilation_rate=[1,1], activation=None, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), name='last_conv1x1')

               self.prediction_mask = tf.reshape(x,[-1,self.uniform_image_h,self.uniform_image_w,self.num_classes],name='prediction_mask')



      def _build_train_op(self):
          self.global_step = tf.train.get_or_create_global_step()
          self.entropy_loss = tf.reduce_mean(self.xentropy_loss(self.prediction_mask,self.gt_trimaps))
          tf.summary.scalar('entropy_loss',self.entropy_loss)

          with tf.variable_scope('mean_iou_train'):
               self.iou, self.iou_update = self.calculate_iou(self.prediction_mask,self.gt_trimaps)
          tf.summary.scalar('IoU',self.iou)

          self.lrn_rate = tf.train.exponential_decay(learning_rate=1e-3,
                                                   global_step=self.global_step,
                                                   decay_steps=10000,
                                                   decay_rate=0.98,
                                                   staircase=True)
          tf.summary.scalar('learning_rate', self.lrn_rate)

          optimizer = tf.train.AdamOptimizer(learning_rate=self.lrn_rate)
          update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
          with tf.control_dependencies(update_ops):
               opt = optimizer.minimize(self.entropy_loss,global_step=self.global_step)

          running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope="mean_iou_train")
          self.reset_iou = tf.variables_initializer(var_list=running_vars)
          train_ops =[opt]
          self.train_op = tf.group(*train_ops)





      def build_graph(self):
          self._build_model()
          self._build_train_op()
          self.merged_summary =tf.summary.merge_all()




def train(sess,ckpt_dir='./net/', fromScratch=False,save_steps=500,mode='Training',training_step=5,batch_size=10):
    model=DenseNet(mode=mode)
    model.build_graph()
    sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])
    saver = tf.train.Saver()  # default to save all saveable objects
    train_writer = tf.summary.FileWriter(ckpt_dir+'/log',sess.graph)
    if not fromScratch:
       ckpt = tf.train.latest_checkpoint(ckpt_dir)
       if ckpt:
              saver.restore(sess,ckpt)
       else:
              print('Cannot find ckpt File, training from scratch...')
    else:
       print('Training from scratch...') 


    for ep in range (training_step):
        batch_idxs = len(model.images) // batch_size
        print ('epoch=',ep)
        for idx in range (batch_idxs):
            batch_images_dir = model.images[idx*batch_size:(idx+1)*batch_size]
            batch_labels_dir = model.labels[idx*batch_size:(idx+1)*batch_size]
            batch_images,_, batch_trimaps = process_data(batch_images_dir,batch_labels_dir)
            feed = {model.input_images:batch_images, model.gt_trimaps:batch_trimaps}
            batch_entropy_loss, step, batch_mask, batch_image, _, _, _=sess.run([model.entropy_loss, model.global_step,  model.prediction_mask, model.input_images,model.reset_iou, model.train_op,  model.iou_update],feed)
            batch_iou = sess.run(model.iou,feed)
            summary_str = sess.run(model.merged_summary, feed)
            print('Training at ', step, 'step, the entropy loss is: ', batch_entropy_loss, ', and the IoU is: ',batch_iou)

            train_writer.add_summary(summary_str, step)
            
            #save the checkpoint
            if step % save_steps == 1:
               if not os.path.isdir(ckpt_dir):
                        os.mkdir(ckpt_dir)
               saver.save(sess,os.path.join(ckpt_dir, 'model.ckpt'), global_step=step)
               print('save the ckpt at ', step, ' step')

 

def infer(sess,image,ckpt_dir='./net/'):
    image_height,image_width,image_channel = image.shape
    holder_w = pow(2,round(math.log(image_width,2)))
    holder_h = pow(2,round(math.log(image_height,2)))
    image = cv2.resize(image,(holder_w,holder_h))
    model = DenseNet(mode='Testing',image_width=holder_w,image_height=holder_h)
    model._build_model()
    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(ckpt_dir)
    saver = tf.train.Saver()
    saver.restore(sess,ckpt.model_checkpoint_path)
    mask_prediction = sess.run(model.prediction_mask,feed_dict={model.input_images:[image]})
    mask_prediction = mask_prediction[0]
    pred_h, pred_w, pred_c = mask_prediction.shape
    pred = np.zeros((pred_h,pred_w))
    pixel_cls = np.argmax(mask_prediction,axis=2)
    for i in range (pred_h):
        for j in range (pred_w):
           if pixel_cls[i,j] == 0:
              pred[i,j]=0
           elif pixel_cls[i,j] == 1:
              pred[i,j] =128
           elif pixel_cls[i,j] == 2:
              pred[i,j]=255
           else:
              raise Exception('Argmax() has wrong axis!')
    return pred

if __name__=='__main__':
   sess=tf.Session()
   train(sess)
   #image = cv2.imread('./data/images/GT01.png')
   #h,w,c = image.shape
   #pred = infer(sess,image)
   sess.close()
   #cv2.imwrite('pred.png',pred)

   #net = DenseNet()
   #print (net.num_classes)
   #images,labels,trimaps = prepare_data('data')
   #print (np.shape(images), np.shape(labels),np.shape(trimaps))
   #loss =net.xentropy_loss(np.divide(images,127),trimaps)
   #iou, _ = net.calculate_iou(np.divide(images,127),trimaps)
   #sess.run(tf.global_variables_initializer())
   #print (sess.run(iou))
   #sess.close()



