import os
from datetime import datetime

import numpy as np
from scipy.misc import imsave
from sklearn.metrics import roc_auc_score
import tensorflow as tf
import tensorlayer as tl


class Model:
      
    def predict(X,Y,args):

        height = 512
        width = 512
        initializer = tf.contrib.layers.xavier_initializer_conv2d()

        # ================== Contracting Path ===================
        with tf.name_scope('u_net'):
            conv1 = tf.layers.conv2d(inputs=X, filters=64, kernel_size=(3, 3), padding='same',
                                          activation=tf.nn.relu, kernel_initializer=initializer)

            conv1 = tf.layers.conv2d(conv1, 64, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv1.2', kernel_initializer=initializer)

            drop1 = tf.nn.dropout(conv1, keep_prob=args.keep_prob, name='drop1')

            max_pool1 = tf.layers.max_pooling2d(drop1, pool_size=(2, 2), strides=(2, 2), padding='same',
                                                     name='max_pool1')

            conv2 = tf.layers.conv2d(max_pool1, 128, kernel_size=(3, 3), padding='same',
                                          activation=tf.nn.relu, name='conv2.1', kernel_initializer=initializer)

            conv2 = tf.layers.conv2d(conv2, 128, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv2.2', kernel_initializer=initializer)

            drop2 = tf.nn.dropout(conv2, keep_prob=args.keep_prob, name='drop2')

            max_pool2 = tf.layers.max_pooling2d(drop2, pool_size=(2, 2), strides=(2, 2), padding='same',
                                                     name='max_pool2')

            conv3 = tf.layers.conv2d(max_pool2, 256, kernel_size=(3, 3), padding='same',
                                          activation=tf.nn.relu, name='conv3.1', kernel_initializer=initializer)

            conv3 = tf.layers.conv2d(conv3, strides=(1, 1), filters=256, kernel_size=(3, 3), padding='same',
                                          activation=tf.nn.relu, name='conv3.2', kernel_initializer=initializer)

            drop3 = tf.nn.dropout(conv3, keep_prob=args.keep_prob, name='drop3')

            max_pool3 = tf.layers.max_pooling2d(drop3, pool_size=(2, 2), strides=(2, 2), padding='same',
                                                     name='max_pool3')

            conv4 = tf.layers.conv2d(max_pool3, 512, kernel_size=(3, 3), padding='same',
                                          activation=tf.nn.relu, name='conv4.1', kernel_initializer=initializer)

            conv4 = tf.layers.conv2d(conv4, 512, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv4.2', kernel_initializer=initializer)

            drop4 = tf.nn.dropout(conv4, keep_prob=args.keep_prob, name='drop4')

            max_pool4 = tf.layers.max_pooling2d(drop4, pool_size=(2, 2), strides=(2, 2), padding='same',
                                                     name='max_pool4')

            conv5 = tf.layers.conv2d(max_pool4, 1024, kernel_size=(3, 3), padding='same',
                                          activation=tf.nn.relu, name='conv5.1', kernel_initializer=initializer)

            conv5 = tf.layers.conv2d(conv5, 1024, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv5.2', kernel_initializer=initializer)

            drop5 = tf.nn.dropout(conv5, keep_prob=args.keep_prob, name='drop5')

            # ================== Expanding Path ===================


            deconv1 = tf.layers.conv2d_transpose(drop5, 512, kernel_size=(2, 2), strides=(2, 2),
                                                      padding='same',
                                                      activation=tf.nn.relu, name='upconv1.1',
                                                      kernel_initializer=initializer)

            deconv1 = tf.concat([deconv1, conv4], 3, name='concat1')

            conv6 = tf.layers.conv2d(deconv1, 512, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv6.1', kernel_initializer=initializer)

            conv6 = tf.layers.conv2d(conv6, 512, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv6.2', kernel_initializer=initializer)

            drop6 = tf.nn.dropout(conv6, keep_prob=args.keep_prob, name='drop6')

            deconv2 = tf.layers.conv2d_transpose(drop6, 256, kernel_size=(2, 2), strides=(2, 2),
                                                      padding='same',
                                                      activation=tf.nn.relu, name='upconv2.1',
                                                      kernel_initializer=initializer)

            deconv2 = tf.concat([deconv2, conv3], 3, name='concat2')

            conv7 = tf.layers.conv2d(deconv2, 256, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv7.1', kernel_initializer=initializer)

            conv7 = tf.layers.conv2d(conv7, 256, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv7.2', kernel_initializer=initializer)

            drop7 = tf.nn.dropout(conv7, keep_prob=args.keep_prob, name='drop7')

            deconv3 = tf.layers.conv2d_transpose(drop7, 128, kernel_size=(2, 2), strides=(2, 2),
                                                      padding='same',
                                                      activation=tf.nn.relu, name='upconv3.1',
                                                      kernel_initializer=initializer)

            deconv3 = tf.concat([deconv3, conv2], 3, name='concat3')

            conv8 = tf.layers.conv2d(deconv3, 128, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv8.1', kernel_initializer=initializer)

            conv8 = tf.layers.conv2d(conv8, 128, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv8.2', kernel_initializer=initializer)

            drop8 = tf.nn.dropout(conv8, keep_prob=args.keep_prob, name='drop8')

            deconv4 = tf.layers.conv2d_transpose(drop8, 64, kernel_size=(2, 2), strides=(2, 2),
                                                      padding='same',
                                                      activation=tf.nn.relu, name='upconv4.1',
                                                      kernel_initializer=initializer)

            deconv4 = tf.concat([deconv4, conv1], 3, name='concat4')

            conv9 = tf.layers.conv2d(deconv4, 64, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv9.1', kernel_initializer=initializer)

            conv9 = tf.layers.conv2d(conv9, 64, kernel_size=(3, 3), padding='same', activation=tf.nn.relu,
                                          name='conv9.2', kernel_initializer=initializer)

            drop9 = tf.nn.dropout(conv9, keep_prob=args.keep_prob, name='drop9')

            logits = tf.layers.conv2d(drop9, 1, kernel_size=(1, 1), padding='same', activation=tf.nn.sigmoid,
                                           name='logits', kernel_initializer=initializer)

        with tf.name_scope('loss'):
            loss = 1 - tl.cost.dice_coe(logits, Y, axis=[0, 1, 2, 3])
            loss = tf.reduce_mean(loss, name='loss')

        with tf.name_scope('train'):
            optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
            training_op = optimizer.minimize(loss)

        return training_op,loss


    def infer():
        sess = tf.Session()
        load(args.load_checkpoint)
        dice_list = []
        roc_list = []
        for iteration in range(X_test.shape[0] // args.batch_size):
            X_batch, y_batch = data_provider(X_test, y_test, iteration)
            output = sess.run(logits, feed_dict={X: X_batch})
            output_mask = np.squeeze((output > 0.5).astype(dtype=np.float32))
            y_batch = np.squeeze(y_batch)
            y_batch_mask = y_batch > 0.5
            y_batch_mask = y_batch_mask.astype(np.int64)
            dice_list.append(np.sum(output_mask[y_batch_mask == 1.0])*2.0 / (np.sum(output_mask) + np.sum(y_batch_mask)))
            roc_list.append(roc_auc_score(y_true = y_batch_mask.flatten(), y_score = np.squeeze(output).flatten()))
            # imsave('../data/output/{}.png'.format(iteration), np.squeeze(output))
            # imsave('../data/output/{}_anno.png'.format(iteration), np.squeeze(y_batch))
        print((sum(dice_list) / len(dice_list)), sum(roc_list)/len(roc_list))

    def data_provider(args,X,y,iteration):
        begin = args.batch_size * iteration
        end = args.batch_size * (iteration + 1)
        return X[begin:end, :, :, :], y[begin:end, :, :, :]

    def unison_shuffled_copies( X, y):
        p = np.random.permutation(X.shape[0])
        return X[p, :, :, :], y[p, :, :, :]

    def save(epoch,sess,args):
        print('[*] Saving checkpoint ....')
        model_name = 'model_{}_epoch_{}.ckpt'.format(datetime.now().strftime("%d:%H:%M:%S"), epoch)
        saver = tf.train.Saver()
        save_path = saver.save(sess, os.path.join(args.saved_model_directory, model_name))
        print('[*] Checkpoint saved in file {}'.format(save_path))

    def load(model_name):
        print(" [*] Loading checkpoint...")
        saver = tf.train.Saver()
        saver.restore(sess, os.path.join(args.saved_model_directory, model_name))
