import tensorflow as tf
from sklearn.utils import shuffle
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.training import saver as saver_lib
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import importer
from tensorflow.python.platform import gfile
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
import os


tf.reset_default_graph()
num_classes = 10
inShape = (None, 64, 84, 1)
outShape = (None, 64, 84, num_classes)

checkpoint_version = saver_pb2.SaverDef.V2
input_checkpoint = '../checkpoints_9/mnist.ckpt-75'
tf.reset_default_graph()
input_graph = '../checkpoints_9/mnist.pb'
input_binary = True

def _parse_input_graph_proto(input_graph, input_binary):
    if not gfile.Exists(input_graph):
        print("Input graph file '" + input_graph + "' does not exist!")
        return -1
    input_graph_def = graph_pb2.GraphDef()
    mode = "rb" if input_binary else "r"
    with gfile.FastGFile(input_graph, mode) as f:
        if input_binary:
            input_graph_def.ParseFromString(f.read())
    return input_graph_def
input_graph_def = _parse_input_graph_proto(input_graph, input_binary)
_ = importer.import_graph_def(input_graph_def, name="")

with session.Session() as sess:
    var_list = {}
    reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
    var_to_shape_map = reader.get_variable_to_shape_map()
    for key in var_to_shape_map:
        try:
            tensor = sess.graph.get_tensor_by_name(key + ":0")
        except KeyError:
            continue
        var_list[key] = tensor
    saver = saver_lib.Saver(var_list=var_list, write_version=checkpoint_version)
    saver.restore(sess, input_checkpoint)
    W1, B1, W2, B2 = sess.run(['conv1/kernel:0', 'conv1/bias:0', \
                     'conv2/kernel:0', 'conv2/bias:0', \
                     ])
    W1i = tf.constant_initializer(W1);B1i = tf.constant_initializer(B1)
    W2i = tf.constant_initializer(W2);B2i = tf.constant_initializer(B2)

input_ph = tf.placeholder(dtype=tf.float32, shape=inShape, name='input_image')
labels = tf.placeholder(dtype=tf.int32, shape=(None, num_classes), name='labels')
learning_rate_ph = tf.placeholder(dtype=tf.float32, name='learning_rate')

scale = 2
l1_1 = tf.layers.conv2d(input_ph, 16 * scale, 3, 1, 'same', activation=tf.nn.relu, name='conv1', kernel_initializer=W1i, bias_initializer=B1i)
l1_2 = tf.layers.max_pooling2d(l1_1, 2, 2, name='pool1')
l1_3 = tf.layers.conv2d(l1_2, 32 * scale, 3, 1, 'same', activation=tf.nn.relu, name='conv2', kernel_initializer=W2i, bias_initializer=B2i)
l1_4 = tf.layers.max_pooling2d(l1_3, 2, 2, name='pool2')
l1_5 = tf.layers.conv2d(l1_4, 64 * scale, 3, 1, 'same', activation=tf.nn.relu, name='conv3')
l2_1 = tf.layers.conv2d_transpose(l1_5, 32 * scale, 2, 2, activation=tf.nn.relu, name='convT1')
l2_2 = tf.layers.conv2d_transpose(l2_1, 16 * scale, 2, 2, activation=tf.nn.relu, name='convT2')
l2_3 = tf.layers.conv2d(l2_2, num_classes, 1, 1, activation=tf.nn.relu, name='conv2d')
predictions_out_op = tf.identity(tf.nn.softmax(l2_3), name='predictions_softmax')
predictions_op = predictions_out_op > 0.1

print(l1_1.shape, l1_2.shape, l1_3.shape, l1_4.shape, l1_5.shape)
print(l2_1.shape, l2_2.shape, l2_3.shape)

logits = tf.reshape(l2_3, [-1, num_classes], name='logits')
labels_ph = tf.placeholder(dtype=tf.int32, shape=outShape, name='segmentation_labels')
labels_flat = tf.reshape(labels_ph, [-1, num_classes])
cost_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_flat, logits=logits))
optimizer_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_ph).minimize(cost_op)

def get_next_batch(X, y):
    for start in range(0, len(y), batch_size):
        end = min(len(y), start + batch_size)
        yield X[start:end], y[start:end]

def predict(X):
    X_ = np.expand_dims(X, 3)
    feed_dict = {input_ph: X_}
    result = predictions_op.eval(feed_dict=feed_dict)
    return result

def eval_Iou(X, y):
    nb_batches = 0
    total_iou = 0
    for batch_X, batch_y in get_next_batch(X, y):
        preds = predict(batch_X)
        inter_M = np.logical_and(batch_y, preds).astype(np.float)
        union_M = np.logical_or(batch_y, preds).astype(np.float)
        mean_interM = np.mean(inter_M, axis=0)
        mean_unionM = np.mean(union_M, axis=0)
        value_I = np.sum(mean_interM)
        value_U = np.sum(mean_unionM)
        if value_U is not 0:
            total_iou += value_I / value_U
        nb_batches += 1
    return 100 * total_iou / nb_batches

def mean(a):
    return sum(a) * 1.0 / len(a)

import numpy as np
X = np.load('../combined.npy');
y = np.load('../segmented.npy')


models_dir = './myFcn_9Layers'
if not os.path.exists(models_dir):
    os.mkdir(models_dir)

indices = np.arange(0, len(X), 1)
n_train = int(0.7 * len(y))
y_train, X_train, y_val, X_val = y[: n_train], X[: n_train, :], y[n_train:], X[n_train:, :]

X, y = shuffle(X, y)
X = X.astype(np.float32)
y = y.astype(np.float32)
# Normalize
X -= 127.0
X /= 127.0

batch_size = 4
epoches = 500
learning_rate = 0.001

Ctrn = [];
Cval = [];
Tiou = [];
Viou = []
a = X_val[0]
a1 = tf.expand_dims(a, 0)
a2 = tf.expand_dims(a1, 3)


with tf.Session() as sess:
    tfvars = tf.global_variables()
    sess.run(tf.variables_initializer(tfvars))
    # sess.run(tf.global_variables_initializer())
    for epoch in range(epoches):
        ctrn = [];
        cval = [];
        tiou = [];
        viou = [];
        for start in range(0, len(y_train), batch_size):
            end = min(len(y_train), start + batch_size)
            x, y = X_train[start:end], y_train[start:end]
            x_ = np.expand_dims(x, 3)
            _, cost_trn = sess.run([optimizer_op, cost_op],
                                   feed_dict={input_ph: x_, labels_ph: y, learning_rate_ph: learning_rate})
            train_Iou = eval_Iou(x, y)
            ctrn += [cost_trn];
            tiou += [train_Iou]
        train_Iou0 = eval_Iou(X_train[:100], y_train[:100])

        print('train', epoch, train_Iou0)

        for start in range(0, len(y_val), batch_size):
            end = min(len(y_val), start + batch_size)
            x, y = X_val[start:end], y_val[start:end]
            x_ = np.expand_dims(x, 3)
            cost_val = cost_op.eval(feed_dict={input_ph: x_, labels_ph: y})
            val_Iou = eval_Iou(x, y)
            cval += [cost_val];
            viou += [val_Iou]
        val_Iou0 = eval_Iou(X_val[:100], y_val[:100])
        print('val', epoch, val_Iou0)

        threshold = 10
        if val_Iou0>threshold:
            tf.train.write_graph(
                tf.get_default_graph().as_graph_def(),
                './myFcn_9Layers/',
                'fcn9.pbtxt',
                as_text=True)

            tf.train.write_graph(
                tf.get_default_graph().as_graph_def(),
                './myFcn_9Layers/',
                'fcn9.pb',
                as_text=False)
            saver = tf.train.Saver()
            saver.save(sess, './myFcn_9Layers/fcn9.ckpt', epoch + 1)
            break

        Ctrn += [mean(ctrn)];
        Cval += [mean(cval)]
        Tiou += [mean(tiou)];
        Viou += [mean(viou)]
np.savez('Loss.npy', Ctrn=Ctrn, Cval=Cval)
np.savez('Iou.npy', Tiou=Tiou, Viou=Viou)

import os
import matplotlib

matplotlib.use('Agg')
from matplotlib import pyplot as plt

plt_folder = models_dir
plt.figure()
xl = np.linspace(1, len(Tiou), len(Tiou))
plt.plot(xl, Tiou, xl, Viou)
plt.ylabel('Iou')
plt.xlabel("Epoch")
plt.savefig(os.path.join(plt_folder, '{}.png'.format('Iou')))
plt.grid()
# plt.show()
plt.close()

plt.figure()
plt.plot(xl, Ctrn, xl, Cval)
plt.xlabel("Epoch")
plt.ylabel('Loss')
plt.savefig(os.path.join(plt_folder, '{}.png'.format('Loss')))
plt.grid()
# plt.show()
plt.close()


