# -*- coding: UTF-8 -*-

import numpy as np
import tensorflow as tf
import os,cv2
from data_helper import prepare_data



    



class DenseNet(object):
      def __init__(self,mode='Training'):
          '''
          Initializes the DenseNet based on the specific parameters.
          Args:
               growth_k : an integer, growth rate of the densent.
               layers_per_block: List of integers, the number of layers in each dense block.
               num_classes: an integer, Numbers of classes to segment, actually 2 in this project.
           '''

          self.growth_k = 16
          self.layers_per_block = [2,3,3]
          self.num_classes = 3
          self.uniform_image_w = 400
          self.uniform_image_h = 400                # As tf.placeholder needs a fixed size input, the image and label are resized, it can be changed when inferring.
          self.predicted_trimaps = None
          #################################################  hyper-parameters
          images,labels,trimaps = prepare_data('data', self.uniform_image_w,self.uniform_image_h)
          self.images = images
          self.labels = labels
          self.trimaps = trimaps
          #self.input_images = tf.placeholder(tf.float32,[None,self.uniform_image_w,self.uniform_image_h,3],name='input_images_placeholder')
          #self.gt_trimaps = tf.placeholder(tf.int32,[None,self.uniform_image_w,self.uniform_image_h,1])

      def xentropy_loss(self,logits,labels):
          '''
          Calculates the cross-entropy loss over each pixel in the ground truth and the prediction.
          Args:
              logits: Tensor, raw unscaled predictions from the network. It predicts a 3-channel mask.
              labels: Tensor, the ground truth segmentation mask. It has a shape of [Batch_size, width, height,1]
      
          Returns:
              loss: The cross entropy loss over each image in the batch.
          '''
          labels = tf.cast(tf.div(tf.cast(labels,tf.int32),127), tf.int32)
          # 0=tf.div(0,127); 1=tf.div(128,127); 2=tf.div(255,127)
          logits = tf.reshape(logits, [tf.shape(logits)[0], -1, self.num_classes])
          # shape: [Batch_size, All_Pixels_in_an_Image,3]  
          labels = tf.reshape(labels, [tf.shape(labels)[0], -1])
          # shape: [Batch_size, All_Pixels_in_an_Image]
          loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name="loss")
          return loss

      def calculate_iou(self,prediction,mask):
          '''
              Calculates the mean intersection over union (mean pixel accuracy)
              Args:
                    mask: Tensor, The ground truth input segmentation mask. Its shape is [Batch_size, width,height,3]
                    prediction: Tensor, the raw unscaled prediction from the network. Its shape is [Batch_size, width,height,1]
              Returns:
                     iou: Tensor, average iou over the batch.
                     update_op: Tensor op, update operation for the iou metric.
          '''
          mask_triple = tf.div(tf.cast(mask,tf.int8),127)
          # 0=tf.div(0,127); 1=tf.div(128,127); 2=tf.div(255,127)
          mask_triple = tf.cast(mask_triple,tf.int32)
          mask_triple = tf.reshape(tf.one_hot(tf.squeeze(mask_triple), depth=self.num_classes), [tf.shape(mask)[0], -1, self.num_classes])
          print('label shape: ', mask_triple.shape)
          prediction = tf.reshape(prediction,shape=[tf.shape(prediction)[0], -1, self.num_classes])
          print('prediction shape: ', prediction.shape)
          iou, update_op = tf.metrics.mean_iou(tf.argmax(prediction,axis=2),tf.argmax(mask_triple,axis=2),self.num_classes)
          print('iou shape: ',iou.shape)
          return iou, update_op


      def build_graph(self):
          self._build_model()
          self._build_train_op()
          self.merged_summary =tf.summary.merge_all()

      def _build_model(self):
          pass

          



if __name__=='__main__':
   im = np.arange(160000).reshape(400,400,1)
   im = np.int32(im)
   im = np.float64(im/160000)
   
   cv2.imwrite('im.png',im*255.0)
   image = cv2.imread('./im.png')
   print(image.max())
