import tensorflow as tf
from sklearn.utils import shuffle
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.training import saver as saver_lib
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import importer
from tensorflow.python.platform import gfile
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
import os
import numpy as np

checkpoint_version = saver_pb2.SaverDef.V2
input_checkpoint = './checkpoints_5/mnist.ckpt-75'
tf.reset_default_graph()
input_graph = './checkpoints_5/mnist.pb'
input_binary = True

num_classes = 10
lr_ph = tf.placeholder(dtype=tf.float32, name='learning_rate')
inShape = (None, 64, 84, 1)
outShape = (None, 64, 84, num_classes)

input_ph = tf.placeholder(dtype=tf.float32, shape=inShape, name='input_image')
labels = tf.placeholder(dtype=tf.int32, shape=(None, num_classes), name='labels')
learning_rate_ph = tf.placeholder(dtype=tf.float32, name='learning_rate')


def _parse_input_graph_proto(input_graph, input_binary):
    if not gfile.Exists(input_graph):
        print("Input graph file '" + input_graph + "' does not exist!")
        return -1
    input_graph_def = graph_pb2.GraphDef()
    mode = "rb" if input_binary else "r"
    with gfile.FastGFile(input_graph, mode) as f:
        if input_binary:
            input_graph_def.ParseFromString(f.read())
    return input_graph_def
input_graph_def = _parse_input_graph_proto(input_graph, input_binary)
_ = importer.import_graph_def(input_graph_def, name="")

with session.Session() as sess:
    var_list = {}
    reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
    var_to_shape_map = reader.get_variable_to_shape_map()
    for key in var_to_shape_map:
        try:
            tensor = sess.graph.get_tensor_by_name(key + ":0")
        except KeyError:
            continue
        var_list[key] = tensor
    saver = saver_lib.Saver(var_list=var_list, write_version=checkpoint_version)
    saver.restore(sess, input_checkpoint)
    W, B = sess.run(['conv1/kernel:0', 'conv1/bias:0'])

scale = 2
kernel_init = tf.constant_initializer(W)
bias_init = tf.constant_initializer(B)
l1_1 = tf.layers.conv2d(input_ph, 8, 3, 1, activation=tf.nn.relu, name='conv1', trainable=True,
                        kernel_initializer=kernel_init, bias_initializer=bias_init)
l1_2 = tf.layers.max_pooling2d(l1_1, 2, 2, name='pool1')
l2_0 = tf.layers.conv2d_transpose(l1_2, 32 * scale, 2, 1, activation=tf.nn.relu, name='convT0', \
                                  kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                  bias_initializer=tf.constant_initializer(0.1))

l2_1 = tf.layers.conv2d_transpose(l2_0, 16 * scale, 2, 2, activation=tf.nn.relu, name='convT1', \
                                  kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                  bias_initializer=tf.constant_initializer(0.1))
l1_skip_conv = tf.layers.conv2d_transpose(l1_1, 16 * scale, 3, 1, activation=tf.nn.relu, name='bridge_conv', \
                                          kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                          bias_initializer=tf.constant_initializer(0.1))
l_concat = l2_1 + 0.0 * l1_skip_conv

l_concat_conv = tf.layers.conv2d(l_concat, 16 * scale, 3, 1, 'same', activation=tf.nn.relu, name='l_concat_conv', \
                                 kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
                                 bias_initializer=tf.constant_initializer(0.1))

l2_2 = tf.layers.conv2d(l_concat_conv, num_classes, 1, 1, activation=tf.nn.relu, name='conv2d', \
                        kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
                        bias_initializer=tf.constant_initializer(0.1))
print(l1_2.shape, l2_2.shape)
predictions_op_pre = tf.identity(tf.nn.softmax(l2_2), name='predictions_softmax')
predictions_op = predictions_op_pre > 0.1
logits = tf.reshape(l2_2, [-1, num_classes], name='logits')
labels_ph = tf.placeholder(dtype=tf.int32, shape=outShape, name='segmentation_labels')
labels_flat = tf.reshape(labels_ph, [-1, num_classes])
cost_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_flat, logits=logits))
optimizer_op = tf.train.AdamOptimizer(learning_rate=lr_ph).minimize(cost_op)

def get_next_batch(X, y):
    for start in range(0, len(y), batch_size):
        end = min(len(y), start + batch_size)
        yield X[start:end], y[start:end]

def predict(X):
    X_ = np.expand_dims(X, 3)
    feed_dict = {input_ph: X_}
    result = predictions_op.eval(feed_dict=feed_dict)
    return result

def conv1_1(X):
    X_ = np.expand_dims(X, 3)
    feed_dict = {input_ph: X_}
    result = l1_1.eval(feed_dict=feed_dict)
    return result

def eval_Iou(X, y):
    nb_batches = 0
    total_iou = 0
    for batch_X, batch_y in get_next_batch(X, y):
        preds = predict(batch_X)
        inter_M = np.logical_and(batch_y, preds).astype(np.float)
        union_M = np.logical_or(batch_y, preds).astype(np.float)
        mean_interM = np.mean(inter_M, axis=0)
        mean_unionM = np.mean(union_M, axis=0)
        value_I = np.sum(mean_interM)
        value_U = np.sum(mean_unionM)
        if value_U is not 0:
            total_iou += value_I / value_U
        nb_batches += 1
    return 100 * total_iou / nb_batches


def mean(a):
    return sum(a) * 1.0 /  len(a)

X = np.load('../combined.npy');
y = np.load('../segmented.npy')
models_dir = 'myself_fcn_layer5'
if not os.path.exists(models_dir):
    os.mkdir(models_dir)

indices = np.arange(0, len(X), 1)
n_train = int(0.7 * len(y))
y_train, X_train, y_val, X_val = y[: n_train], X[: n_train, :], y[n_train:], X[n_train:, :]
# Shuffle
X, y = shuffle(X, y)
X = X.astype(np.float32)
y = y.astype(np.float32)
# Normalize
X -= 127.0
X /= 127.0

batch_size = 4
epoches = 300
learning_rate = 0.0001

Ctrn = [];
Cval = [];
Tiou = [];
Viou = [];
a = X_val[0]
for var in tf.trainable_variables():
    print(var)

with tf.Session() as sess:
    tfvars = tf.global_variables()
    sess.run(tf.variables_initializer(tfvars))
    # sess.run(tf.global_variables_initializer())
    for epoch in range(epoches):
        ctrn = [];
        cval = [];
        tiou = [];
        viou = [];
        for start in range(0, len(y_train), batch_size):
            end = min(len(y_train), start + batch_size)
            x, y = X_train[start:end], y_train[start:end]
            x_ = np.expand_dims(x, 3)
            _, cost_trn = sess.run([optimizer_op, cost_op],
                                   feed_dict={input_ph: x_, labels_ph: y, lr_ph: learning_rate})
            train_Iou = eval_Iou(x, y)
            ctrn += [cost_trn];
            tiou += [train_Iou]
        train_Iou0 = eval_Iou(X_train[:100], y_train[:100])

        print('train', epoch, train_Iou0)
        aaa = conv1_1(X_train[0:2])
        # print(aaa[0][0][0])

        for start in range(0, len(y_val), batch_size):
            end = min(len(y_val), start + batch_size)
            x, y = X_val[start:end], y_val[start:end]
            x_ = np.expand_dims(x, 3)
            cost_val = cost_op.eval(feed_dict={input_ph: x_, labels_ph: y})
            val_Iou = eval_Iou(x, y)
            cval += [cost_val];
            viou += [val_Iou]
        val_Iou0 = eval_Iou(X_val[:100], y_val[:100])
        print('val', epoch, val_Iou0)

        p = predict([a])
        b = np.argmax(p, axis=3)
        c = np.array(b, np.float16)
        b = tf.expand_dims(c, 3)

        Ctrn += [mean(ctrn)];
        Cval += [mean(cval)]
        Tiou += [mean(tiou)];
        Viou += [mean(viou)]
np.savez('Loss.npy', Ctrn=Ctrn, Cval=Cval)
np.savez('Iou.npy', Tiou=Tiou, Viou=Viou)

import os
import matplotlib

matplotlib.use('Agg')
from matplotlib import pyplot as plt

plt_folder = models_dir
plt.figure()
xl = np.linspace(1, len(Tiou), len(Tiou))
plt.plot(xl, Tiou, xl, Viou)
plt.ylabel('Iou')
plt.xlabel("Epoch")
plt.savefig(os.path.join(plt_folder, '{}.png'.format('Iou')))
plt.grid()
# plt.show()
plt.close()

plt.figure()
plt.plot(xl, Ctrn, xl, Cval)
plt.xlabel("Epoch")
plt.ylabel('Loss')
plt.savefig(os.path.join(plt_folder, '{}.png'.format('Loss')))
plt.grid()
# plt.show()
plt.close()



