from __future__ import print_function
import os,time, sys, math
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import time, datetime
import argparse
import random
import os, sys, re, gc
import subprocess
import matplotlib
from dataset import get_dataset
matplotlib.use('Agg')

from utils import utils, helpers
from builders import model_builder,frontend_builder

import matplotlib.pyplot as plt

label_names = ["Ring","section","leakwater"]

MODELNAMES_WITHOUT_FRONTEND =	["FC-DenseNet56", "FC-DenseNet67", "FC-DenseNet103", "Encoder-Decoder", "Encoder-Decoder-Skip", "RefineNet",
    "FRRN-A", "FRRN-B", "MobileUNet", "MobileUNet-Skip",  "AdapNet"]

def str2bool(v):
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Boolean value expected.')



parser = argparse.ArgumentParser()
parser.add_argument('--num_iters', type=int, default=200000, help='Number of epochs to train for')
parser.add_argument('--learning_rate', type=float, default=0.15, help='learning rate to train for')
parser.add_argument('--num_gpus', type=int, default=4, help='learning rate to train for')
parser.add_argument('--checkpoint_step', type=int, default=5, help='How often to save checkpoints (epochs)')
parser.add_argument('--validation_step', type=int, default=400, help='How often to perform validation (epochs)')
parser.add_argument('--update_bn', type=bool, default=True, help='The image you want to predict on. Only valid in "predict" mode.')
parser.add_argument('--image', type=str, default=None, help='The image you want to predict on. Only valid in "predict" mode.')
parser.add_argument('--continue_training', type=str2bool, default=False, help='Whether to continue training from a checkpoint')
parser.add_argument('--dataset', type=str, default="CamVid", help='Dataset you are using.')
parser.add_argument('--crop_height', type=int, default=512, help='Height of cropped input image to network')
parser.add_argument('--crop_width', type=int, default=512, help='Width of cropped input image to network')
parser.add_argument('--batch_size', type=int, default=64, help='Number of images in each batch')
parser.add_argument('--num_val_images', type=int, default=20, help='The number of images to used for validations')
parser.add_argument('--h_flip', type=str2bool, default=False, help='Whether to randomly flip the image horizontally for data augmentation')
parser.add_argument('--v_flip', type=str2bool, default=False, help='Whether to randomly flip the image vertically for data augmentation')
#parser.add_argument('--brightness', type=float, default=None, help='Whether to randomly change the image brightness for data augmentation. Specifies the max bightness change as a factor between 0.0 and 1.0. For example, 0.1 represents a max brightness change of 10%% (+-).')
#parser.add_argument('--rotation', type=float, default=None, help='Whether to randomly rotate the image for data augmentation. Specifies the max rotation angle in degrees.')
parser.add_argument('--model', type=str, default="DeepLabV3", help='The model you are using. See model_builder.py for supported models')
parser.add_argument('--frontend', type=str, default='ResNet101', help='The frontend you are using. See frontend_builder.py for supported models')
args = parser.parse_args()


def user_loss(output,target):
    cross_loss = -tf.reduce_mean(target*tf.log(tf.clip_by_value(output,1e-10,1.0)),axis=(1,2))
    cross_loss_l = -tf.reduce_mean((1-target)*tf.log(tf.clip_by_value((1-output),1e-10,1.0)),axis=(1,2))

    all_loss = tf.squeeze(tf.reduce_mean(cross_loss+cross_loss_l*0.3,axis=0))

    ring_loss =  all_loss[0]
    section_loss = all_loss[1]  
    leak_loss = all_loss[2]
    total_loss = tf.reduce_mean(tf.reduce_sum(all_loss,axis=-1))


    tf.add_to_collection(tf.GraphKeys.LOSSES, ring_loss)
    tf.add_to_collection(tf.GraphKeys.LOSSES, section_loss)
    tf.add_to_collection(tf.GraphKeys.LOSSES, leak_loss)
    tf.add_to_collection(tf.GraphKeys.LOSSES, total_loss)

    losses = {}
    losses['total_loss'] = total_loss
    losses['ring_loss'] = ring_loss
    losses['section_loss'] = section_loss
    losses['leak_loss'] = leak_loss
    return losses



def dice_cross_loss(output, target, loss_type='jaccard', smooth=1e-5):
    """
    Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity of two batch of data, 
    usually be used for binary image segmentation
    i.e. labels are binary. 
    The coefficient between 0 to 1, 1 means totally match.
    Parameters
    -----------
    output : Tensor
        A distribution with shape: [batch_size, ....], (any dimensions).
    target : Tensor
        The target distribution, format the same with `output`.
    loss_type : str
        ``jaccard`` or ``sorensen``, default is ``jaccard``.
    axis : tuple of int
        All dimensions are reduced, default ``[1,2,3]``.
    smooth : float
        This small value will be added to the numerator and denominator.
            - If both output and target are empty, it makes sure dice is 1.
            - If either output or target are empty (all pixels are background),
             dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), 
             so in this case, higher smooth can have a higher dice.
    """
    #cross_loss = tf.reduce_mean(\
        #tf.nn.sigmoid_cross_entropy_with_logits(logits=output,labels=target),axis=(1,2))
    #output =tf.sigmoid(output)
    bg_inse = tf.reduce_sum((1-output) * (1-target), axis=(1,2))
    inse = tf.reduce_sum(output * target, axis=(1,2))
    if loss_type == 'jaccard':
        l = tf.reduce_sum(output * output, axis=(1,2))
        l_1 = tf.reduce_sum((1-output) * (1-output), axis=(1,2))
        r = tf.reduce_sum(target * target, axis=(1,2))
        r_1 = tf.reduce_sum((1-target) * (1-target), axis=(1,2))
        #r = tf.reduce_sum((1-target) * target, axis=(1,2))
    elif loss_type == 'sorensen':
        l = tf.reduce_sum(output, axis=(1,2))
        l_1 = tf.reduce_sum((1-output), axis=(1,2))
        r = tf.reduce_sum(target, axis=(1,2))
        r_1 = tf.reduce_sum((1-target), axis=(1,2))
    else:
        raise Exception("Unknow loss_type")
    dice = (2. * inse + smooth) / (l + r + smooth)
    dice_1 = (2. * bg_inse + smooth) / (l_1 + r_1 + smooth) / 10
    all_dice = tf.squeeze(tf.reduce_mean(dice,axis=0))
    
    ring_loss =  all_dice[0]
    section_loss = all_dice[1]  
    leak_loss = all_dice[2]
    total_loss = tf.reduce_mean(tf.reduce_sum(all_dice,axis=-1))


    tf.add_to_collection(tf.GraphKeys.LOSSES, ring_loss)
    tf.add_to_collection(tf.GraphKeys.LOSSES, section_loss)
    tf.add_to_collection(tf.GraphKeys.LOSSES, leak_loss)
    tf.add_to_collection(tf.GraphKeys.LOSSES, total_loss)

    losses = {}
    losses['total_loss'] = total_loss
    losses['ring_loss'] = ring_loss
    losses['section_loss'] = section_loss
    losses['leak_loss'] = leak_loss

    #tf.summary.scalar('total_loss', losses['total_loss'])
    #tf.summary.scalar('ring_loss', losses['ring_loss'])
    #tf.summary.scalar('section_loss', losses['section_loss'])
    #tf.summary.scalar('leak_loss', losses['leak_loss'])

    return losses

def Tower_Loss(scope, image_input, gt_mask):
    # Build inference Graph.
    net_output = Scandata_Inference(image_input)
    #get all losses
    out_loss = user_loss(net_output, gt_mask)

    losses = tf.get_collection('losses', scope)
    
    # Attach a scalar summary to all individual losses and the total loss; do the
    # # same for the averaged version of the losses.
    with tf.device('/cpu:0'):
        for l in losses:
            # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
            #  session. This helps the clarity of presentation on tensorboard.
            loss_name = re.sub('%s_[0-9]*/' % 'tower', '', l.op.name)
            tf.summary.scalar(loss_name, l)
    return out_loss

def Averge_Gradients(tower_grads):
    average_grads = []
    for grad_and_vars in zip(*tower_grads):
        # Note that each grad_and_vars looks like the following:
        # #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
        grads = []
        #print(grad_and_vars)
        for g, _ in grad_and_vars:
            # Add 0 dimension to the gradients to represent the tower.
            if g is None:
                g = tf.zeros_like(_)
            expanded_g = tf.expand_dims(g, 0)
            grads.append(expanded_g)
        grad = tf.concat(axis=0, values=grads)
        grad = tf.reduce_mean(grad, 0)
    # Keep in mind that the Variables are redundant because they are shared
    # across towers. So .. we will just return the first tower's pointer to
    v = grad_and_vars[0][1]
    grad_and_var = (grad, v)
    average_grads.append(grad_and_var)
    return average_grads


def solve(global_step, image_input, gt_mask):
    """add solver to losses"""
    # learning reate
    decay_steps = int(5127 / args.batch_size * 10)
    lr = tf.train.exponential_decay(args.learning_rate,\
            global_step,
            decay_steps,
            0.96,
            staircase=True,
            name='exponential_decay_learning_rate')
    optimizer = tf.train.AdamOptimizer(\
        lr,
        beta1=0.9,
        beta2=0.999,
        epsilon=1.0)
    tower_gradients = []

    image_inputs = tf.split(image_input, args.num_gpus, axis=0)
    gt_masks = tf.split(gt_mask, args.num_gpus, axis=0)
    #print(image_inputs,gt_masks)
    with tf.variable_scope(tf.get_variable_scope()):
      for i in range(args.num_gpus):
        with tf.device('/gpu:%d' % i):
          with tf.name_scope('%s_%d' % ("GPU_Tower", i)) as scope:
              #out_loss = Tower_Loss(scope, image_input, gt_mask)
              out_loss = Tower_Loss(scope, image_inputs[i], gt_masks[i])
              # update_op = optimizer.minimize(total_loss)
              tf.get_variable_scope().reuse_variables()
              variables_to_train = tf.trainable_variables()
              gradients = optimizer.compute_gradients(out_loss["total_loss"], var_list=variables_to_train)
              tower_gradients.append(gradients)
              #print(tower_gradients)

    tf.summary.scalar('learning_rate', lr)
    # compute and apply gradient
    avg_grads = Averge_Gradients(tower_gradients)
    # Add histograms for gradients

    for grad, var in avg_grads:
        if grad is not None:
            tf.summary.histogram(var.op.name + '/gradients', grad)
    

    update_ops = []

    grad_updates = optimizer.apply_gradients(avg_grads, global_step=global_step)
    update_ops.append(grad_updates)
    
    # update moving mean and variance
    if args.update_bn:
        update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        update_bn = tf.group(*update_bns)
        update_ops.append(update_bn)

    return tf.group(*update_ops),out_loss

def Scandata_Inference(net_input,is_training = True):
    num_classes = len(label_names)
    network = model_builder.build_model(model_name=args.model,\
         frontend=args.frontend, net_input=net_input, \
             num_classes=num_classes, crop_width=args.crop_width, \
                 crop_height=args.crop_height, is_training=is_training)
    return network


def gpu_validation(validation_element):
    image_input, gt_mask = validation_element
    image_inputs = tf.split(image_input,num_or_size_splits=args.num_gpus)
    gt_masks = tf.split(gt_mask,num_or_size_splits=args.num_gpus)
    pred_lst = []
    gt_mask_lst = []
    with tf.variable_scope(tf.get_variable_scope()):
        for i in range(args.num_gpus):
            with tf.device('/gpu:%d' % i):
                tf.get_variable_scope().reuse_variables()
                pred_tensor= Scandata_Inference(image_inputs[i],is_training = False)
                pred_lst.append(pred_tensor)
                gt_mask_lst.append(gt_masks[i])
    pred_output = tf.concat(pred_lst,axis=0)
    mask_output = tf.concat(gt_mask_lst,axis=0)
    return pred_output,mask_output



    

def plot_ac(epoch,avg_scores_per_epoch,avg_loss_per_epoch,avg_iou_per_epoch):
    fig1, ax1 = plt.subplots(figsize=(11, 8))
    ax1.plot(range(epoch+1), avg_scores_per_epoch)
    ax1.set_title("Average validation accuracy vs epochs")
    ax1.set_xlabel("Epoch")
    ax1.set_ylabel("Avg. val. accuracy")
    plt.savefig('accuracy_vs_epochs.png')
    plt.clf()

    fig2, ax2 = plt.subplots(figsize=(11, 8))
    ax2.plot(range(epoch+1), avg_loss_per_epoch)
    ax2.set_title("Average loss vs epochs")
    ax2.set_xlabel("Epoch")
    ax2.set_ylabel("Current loss")
    plt.savefig('loss_vs_epochs.png')
    plt.clf()

    fig3, ax3 = plt.subplots(figsize=(11, 8))
    ax3.plot(range(epoch+1), avg_iou_per_epoch)
    ax3.set_title("Average IoU vs epochs")
    ax3.set_xlabel("Epoch")
    ax3.set_ylabel("Current IoU")
    plt.savefig('iou_vs_epochs.png')


def train(train_dir,validation_dir):
    train_element,train_iter = get_dataset(train_dir,batch_size=args.batch_size,is_training=True,reused=False)
    train_input,train_gt_masks = train_element
    #init_fn, train_output= Scandata_Inference(train_input)
    validation_element,validation_iter = get_dataset(validation_dir,batch_size=40,epoch=1,is_training=False)
    


    global_step = slim.create_global_step()
    update_op,losses = solve(global_step, train_input, train_gt_masks)
    pred_output,mask_output = gpu_validation(validation_element)

    if args.model in MODELNAMES_WITHOUT_FRONTEND:
        init_fn = None
    else:
        init_fn = frontend_builder.build_initfn(args.frontend)


    init_op = tf.group(
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
            )
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

    
    sess.run(init_op)

    summary_op = tf.summary.merge_all()
    logdir = os.path.join("./output/"+args.model+"/", time.strftime('%Y%m%d%H%M%S', time.gmtime()))
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    summary_writer = tf.summary.FileWriter(logdir, graph=sess.graph)
    
    saver=tf.train.Saver(max_to_keep=1000)
    #sess.run(tf.global_variables_initializer())


    if init_fn is not None:
        init_fn(sess)
    # Load a previous checkpoint if desire
    model_checkpoint_name = "checkpoints/latest_model_" + \
        args.model + "_" + args.dataset + ".ckpt"
    if args.continue_training:
        print('Loaded latest model checkpoint')
        saver.restore(sess, model_checkpoint_name)
        
    average_time = []

    print("start training...")

    # Do the training here
    for step in range(args.num_iters):
        st = datetime.datetime.now()
        # Do the training
        #print(update_op)
        _,total_loss,ring_loss,section_loss,leak_loss =\
                sess.run([update_op, losses["total_loss"],losses['ring_loss'],losses['section_loss'],losses['leak_loss']])
        batch_time = datetime.datetime.now() - st 
        string_print = "Epoch = %d Count = %d Total_Loss = %.4f Ring_Loss = %.4f Section_loss = %.4f Leak_loss = %.4f Time = %.2f" % (
                32, step, total_loss, ring_loss, section_loss, leak_loss, batch_time.total_seconds())
        print(string_print)
        average_time.append(batch_time.total_seconds())
        # tf.summary.scalar("batch_duration", batch_time.total_seconds())
        # """
        if step % args.validation_step == 0 and step!=0:
            print("Performing validation.....")
            sess.run(validation_iter.initializer)
            u_metric = utils.user_evaluation(label_names)
            for i in range(3):
                validation_pred,validation_gt_mask = sess.run([pred_output,mask_output])
                validation_pred = np.where(validation_pred < 0.5, 0.0,1.0)
                u_metric.add_metric_list(validation_pred,validation_gt_mask)
            avg = u_metric.average
            u_metric.save("./output/%s/val_scores/" % args.model, 'no%d'%step,average=True)
            for name in label_names:
                avg_n = avg[name]
                print("\nAverage validation accuracy = %f" %( avg_n["accuracy"]))
                print("Average per class validation accuracies for epoch #:", avg_n["class_accuracies"])
                print("Validation precision = ", avg_n["precision"])
                print("Validation recall = ", avg_n["recall"])
                print("Validation F1 score = ", avg_n["f1"])
                print("Validation IoU score = ", avg_n["iou"])
            del u_metric
            del avg_n
            gc.collect()
        #"""

        if step % 50 == 0 and step!=0:
            summary_str = sess.run(summary_op)
            summary_writer.add_summary(summary_str, step)
            summary_writer.flush()
            if step != 0:
                average_time_value = np.mean(average_time)
                remain_time = average_time_value*(args.num_iters-step)
                m, s = divmod(remain_time, 60)
                h, m = divmod(m, 60)
                train_time = "Remaining training time = %d hours %d minutes %d seconds\n" % (
                    h, m, s)
            else:
                train_time = "Remaining training time : Training completed.\n"
            del average_time
            gc.collect()
            average_time = []
            print(train_time)
            #average_time = average_time.tolist()

        # Save latest checkpoint to same file name
        if (step % 40000 == 0 or step + 1 == args.num_iters)  and step!=0:
            print("Saving latest checkpoint")
            saver.save(sess, model_checkpoint_name, global_step=step)

if __name__ == "__main__":
    train_dir = os.path.abspath("/home/ubuntu/桌面/segmentation/train_data.tfrecords")
    val_dir = os.path.abspath("/home/ubuntu/桌面/segmentation/test_data.tfrecords")
    with tf.Graph().as_default(),tf.device("/cpu:0"):
        train(train_dir,val_dir)