import tensorflow as tf
from sklearn.utils import shuffle
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.training import saver as saver_lib
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import importer
from tensorflow.python.platform import gfile
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
import os
from matplotlib import pyplot as plt
import numpy as np

num_classes = 10
inShape = (None, 64, 84, 1)
outShape = (None, 64, 84, num_classes)

checkpoint_version = saver_pb2.SaverDef.V2
pre_epoch=64
input_checkpoint = './myFcn_9Layers/fcn9.ckpt-'+str(pre_epoch)
input_graph = './myFcn_9Layers/fcn9.pb'
input_binary = True

def _parse_input_graph_proto(input_graph, input_binary):
    if not gfile.Exists(input_graph):
        print("Input graph file '" + input_graph + "' does not exist!")
        return -1
    input_graph_def = graph_pb2.GraphDef()
    mode = "rb" if input_binary else "r"
    with gfile.FastGFile(input_graph, mode) as f:
        if input_binary:
            input_graph_def.ParseFromString(f.read())
    return input_graph_def
input_graph_def = _parse_input_graph_proto(input_graph, input_binary)
_ = importer.import_graph_def(input_graph_def, name="")

with session.Session() as sess:
    var_list = {}
    reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
    var_to_shape_map = reader.get_variable_to_shape_map()
    for key in var_to_shape_map:
        try:
            tensor = sess.graph.get_tensor_by_name(key + ":0")
        except KeyError:
            continue
        var_list[key] = tensor
    saver = saver_lib.Saver(var_list=var_list, write_version=checkpoint_version)
    saver.restore(sess, input_checkpoint)
    W1, B1, W2, B2, W3, B3, W4, B4, W5, B5, W6, B6 = sess.run(['conv1/kernel_1:0', 'conv1/bias_1:0', \
                                'conv2/kernel_1:0', 'conv2/bias_1:0', \
                                'conv3/kernel:0', 'conv3/bias:0', \
                                'convT1/kernel:0', 'convT1/bias:0', \
                                'convT2/kernel:0', 'convT2/bias:0', \
                                'conv2d/kernel:0', 'conv2d/bias:0' \
                               ])
    W1i = tf.constant_initializer(W1);B1i = tf.constant_initializer(B1)
    W2i = tf.constant_initializer(W2);B2i = tf.constant_initializer(B2)
    W3i = tf.constant_initializer(W3);B3i = tf.constant_initializer(B3)
    W4i = tf.constant_initializer(W4);B4i = tf.constant_initializer(B4)
    W5i = tf.constant_initializer(W5);B5i = tf.constant_initializer(B5)
    W6i = tf.constant_initializer(W6);B6i = tf.constant_initializer(B6)

def nn(alpha=0.1, beta=0.1):
    input_ph = tf.placeholder(dtype=tf.float32, shape=inShape, name='input_image')
    learning_rate_ph = tf.placeholder(dtype=tf.float32, name='learning_rate')

    scale = 2
    l1_1 = tf.layers.conv2d(input_ph, 16 * scale, 3, 1, 'same', activation=tf.nn.relu, name='conv1', kernel_initializer=W1i, bias_initializer=B1i)

    l1_2 = tf.layers.max_pooling2d(l1_1, 2, 2, name='pool1')

    l1_3 = tf.layers.conv2d(l1_2, 32 * scale, 3, 1, 'same', activation=tf.nn.relu, name='conv2', kernel_initializer=W2i, bias_initializer=B2i)

    l1_4 = tf.layers.max_pooling2d(l1_3, 2, 2, name='pool2')

    l1_5 = tf.layers.conv2d(l1_4, 64 * scale, 3, 1, 'same', activation=tf.nn.relu, name='conv3', kernel_initializer=W3i, bias_initializer=B3i)

    l2_1 = tf.layers.conv2d_transpose(l1_5, 32 * scale, 2, 2, activation=tf.nn.relu, name='convT1', kernel_initializer=W4i, bias_initializer=B4i)

    l2_c  = l2_1 + alpha * l1_3
    l2_2 = tf.layers.conv2d_transpose(l2_c, 16 * scale, 2, 2, activation=tf.nn.relu, name='convT2', kernel_initializer=W5i, bias_initializer=B5i)

    l2_3 = tf.layers.conv2d(l2_2, num_classes, 1, 1, activation=tf.nn.relu, name='conv2d', kernel_initializer=W6i, bias_initializer=B6i)

    l1_1_bridge = tf.layers.conv2d(l1_1, num_classes, 1, 1,  activation=tf.nn.relu, name='conv1_bridge')
    l3_c = l2_3 + beta * l1_1_bridge
    predictions_out_op = tf.identity(tf.nn.softmax(l3_c), name='predictions_softmax')

    predictions_op = predictions_out_op > 0.1

    print(l1_1.shape, l1_2.shape, l1_3.shape, l1_4.shape, l1_5.shape)
    print(l2_1.shape, l2_2.shape, l2_3.shape)

    logits = tf.reshape(l2_3, [-1, num_classes], name='logits')
    labels_ph = tf.placeholder(dtype=tf.int32, shape=outShape, name='segmentation_labels')
    labels_flat = tf.reshape(labels_ph, [-1, num_classes])
    cost_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_flat, logits=logits))
    optimizer_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_ph).minimize(cost_op)
    return input_ph, labels_ph, learning_rate_ph, predictions_op, cost_op, optimizer_op

def get_next_batch(X, y):
    for start in range(0, len(y), batch_size):
        end = min(len(y), start + batch_size)
        yield X[start:end], y[start:end]


def predict(X):
    X_ = np.expand_dims(X, 3)
    feed_dict = {input_ph: X_}
    result = predictions_op.eval(feed_dict=feed_dict)
    return result


def eval_Iou(X, y):
    nb_batches = 0
    total_iou = 0
    for batch_X, batch_y in get_next_batch(X, y):
        preds = predict(batch_X)
        inter_M = np.logical_and(batch_y, preds).astype(np.float)
        union_M = np.logical_or(batch_y, preds).astype(np.float)
        mean_interM = np.mean(inter_M, axis=0)
        mean_unionM = np.mean(union_M, axis=0)
        value_I = np.sum(mean_interM)
        value_U = np.sum(mean_unionM)
        if value_U is not 0:
            total_iou += value_I / value_U
        nb_batches += 1
    return 100 * total_iou / nb_batches

def mean(a):
    return sum(a) * 1.0 / len(a)


X = np.load('../combined.npy');
y = np.load('../segmented.npy')


models_dir = './myFcn_9Layers'
if not os.path.exists(models_dir):
    os.mkdir(models_dir)

indices = np.arange(0, len(X), 1)
n_train = int(0.7 * len(y))
y_train, X_train, y_val, X_val = y[: n_train], X[: n_train, :], y[n_train:], X[n_train:, :]

X, y = shuffle(X, y)
X = X.astype(np.float32)
y = y.astype(np.float32)
# Normalize
X -= 127.0
X /= 127.0

batch_size = 4
epoches = 300
learning_rate = 0.1
plt.figure()

for aa in [0, 0.03, 0.06, 0.09]:
    tf.reset_default_graph()
    input_ph, labels_ph, learning_rate_ph, predictions_op, cost_op, optimizer_op = nn(alpha=aa, beta=0)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        Ctrn = [];        Cval = [];        Tiou = [];        Viou = []
        for epoch in range(epoches):
            ctrn = [];            cval = [];            tiou = [];            viou = [];
            for start in range(0, len(y_train), batch_size):
                end = min(len(y_train), start + batch_size)
                x, y = X_train[start:end], y_train[start:end]
                x_ = np.expand_dims(x, 3)
                _, cost_trn = sess.run([optimizer_op, cost_op],
                                       feed_dict={input_ph: x_, labels_ph: y, learning_rate_ph: learning_rate})
                train_Iou = eval_Iou(x, y)
                ctrn += [cost_trn]
                tiou += [train_Iou]
            train_Iou0 = eval_Iou(X_train[:100], y_train[:100])

            print('train', epoch, train_Iou0)

            for start in range(0, len(y_val), batch_size):
                end = min(len(y_val), start + batch_size)
                x, y = X_val[start:end], y_val[start:end]
                x_ = np.expand_dims(x, 3)
                cost_val = cost_op.eval(feed_dict={input_ph: x_, labels_ph: y})
                val_Iou = eval_Iou(x, y)
                cval += [cost_val];
                viou += [val_Iou]
            val_Iou0 = eval_Iou(X_val[:100], y_val[:100])
            print('val', epoch, val_Iou0)

            Ctrn += [mean(ctrn)];            Cval += [mean(cval)]
            Tiou += [mean(tiou)];            Viou += [mean(viou)]

        xl = np.linspace(1, len(Tiou), len(Tiou))
        plt.plot(xl, Tiou, label='trn'+str(aa))
        np.savez('seg_skip_'+str(aa), Ctrn, Cval, Tiou, Viou)

plt.ylabel('Iou')
plt.xlabel("Epoch")
plt.grid()
plt.legend()
plt.savefig(os.path.join(models_dir, '{}.png'.format('mIou')))



