import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import accuracy_score
import os
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.training import saver as saver_lib
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import importer
from tensorflow.python.platform import gfile
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session

def _parse_input_graph_proto(input_graph, input_binary):
  if not gfile.Exists(input_graph):
    print("Input graph file '" + input_graph + "' does not exist!")
    return -1
  input_graph_def = graph_pb2.GraphDef()
  mode = "rb" if input_binary else "r"
  with gfile.FastGFile(input_graph, mode) as f:
    if input_binary:
      input_graph_def.ParseFromString(f.read())
  return input_graph_def

def get_next_batch(batch_size, X, y):
    for i in range(0, len(y), batch_size):
        start = i
        end = min(len(y), i + batch_size)
        yield X[start:end, :], y[start:end]

def nn(alpha=1, beta=1):
    checkpoint_version = saver_pb2.SaverDef.V2
    input_checkpoint = '../checkpoints_5/mnist.ckpt-0'
    input_graph = '../checkpoints_5/mnist.pb'
    input_binary = True
    input_graph_def = _parse_input_graph_proto(input_graph, input_binary)
    _ = importer.import_graph_def(input_graph_def, name="")
    with session.Session() as sess:
        var_list = {}
        reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
        var_to_shape_map = reader.get_variable_to_shape_map()
        for key in var_to_shape_map:
            try:
                tensor = sess.graph.get_tensor_by_name(key + ":0")
            except KeyError:
                continue
            var_list[key] = tensor
            print(tensor)
        saver = saver_lib.Saver(var_list=var_list, write_version=checkpoint_version)
        saver.restore(sess, input_checkpoint)
        W1,B1,W2,B2,W3,B3=sess.run(['conv1/kernel:0', 'conv1/bias:0', \
                                    'dense32/kernel:0', 'dense32/bias:0',\
                                    'dense10/kernel:0', 'dense10/bias:0'])
        W1i=tf.constant_initializer(W1);    W2i=tf.constant_initializer(W2)
        W3i=tf.constant_initializer(W3);    B1i=tf.constant_initializer(B1)
        B2i=tf.constant_initializer(B2);    B3i=tf.constant_initializer(B3)

    img_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, 784), name='input_image')
    labels = tf.placeholder(dtype=tf.int32, shape=(None, 1), name='labels')
    learning_rate_ph = tf.placeholder(dtype=tf.float32, name='learning_rate')

    input_2d = tf.reshape(img_placeholder, [-1, 28, 28, 1], name='reshaped_image')
    l1_1 = tf.layers.conv2d(input_2d, 8, 3, 1, activation=tf.nn.relu, name='conv1', padding='same', kernel_initializer=W1i, bias_initializer=B1i)
    l1_2 = tf.layers.max_pooling2d(l1_1, 2, 2, name='pool1')
    l1_5 = tf.layers.flatten(l1_2, name='flatten')
    l2 = tf.layers.dense(l1_5, 32, activation=tf.nn.relu, name='dense32', kernel_initializer=W2i, bias_initializer=B2i)
    out = tf.layers.dense(l2, 10, name='dense10', kernel_initializer=W3i, bias_initializer=B3i)
    predictions_op = tf.identity(tf.nn.softmax(out), name='predictions_softmax')
    print(l1_1.shape, l1_2.shape, l1_5.shape)
    # Define the cost
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=tf.one_hot(labels, depth=10)),
                          name='cost')
    # Apply an optimizer
    adamer = tf.train.AdamOptimizer(learning_rate=learning_rate_ph)
    optimizer = adamer.minimize(cost)
    grad = adamer.compute_gradients(cost)
    return img_placeholder, labels, learning_rate_ph, cost, optimizer, predictions_op, grad

def eval_cost(cost_op, X, y, batch_size):
    total_cost = 0
    nb_batches = 0
    for batch_X, batch_y in get_next_batch(batch_size, X, y):
        feed_dict = {img_placeholder: batch_X, labels: batch_y}
        total_cost += cost_op.eval(feed_dict=feed_dict)
        nb_batches += 1
    return total_cost / nb_batches

def eval_accuracy(predictions_op, X, y, batch_sz):
    nb_batches = 0
    total_acc = 0
    for batch_X, batch_y in get_next_batch(batch_sz, X, y):
        feed_dict = {img_placeholder: batch_X, labels: batch_y}
        temp=predictions_op.eval(feed_dict=feed_dict)
        # print(temp[0])
        y_predicted = np.argmax(temp, 1)
        total_acc += accuracy_score(batch_y, y_predicted)
        nb_batches += 1
    return 100 * total_acc / nb_batches

epochs = 20
batch_size = 10
learning_rate = 0.001
models_dir = '../checkpoints_5'

dataframe = pd.read_csv('../train.csv').as_matrix()
y, X = dataframe[:, 0], dataframe[:, 1:]
y = y.reshape([-1, 1])
y_test, X_test = y[4500:], X[4500:, :]
num=500
yS, XS = y[2000:2000+num], X[2000:2000+num, :]

img_placeholder, labels, learning_rate_ph, cost, optimizer, prediction_op, grad_op = nn(beta=0)


Acc=[];
plt_folder = models_dir
plt.figure()


tb_models_dir = './tensorboard_log'


for t in [9,8,7]:
    file = './random_order/entropy_select_ind{}.npz'.format(t)
    A=np.load(file)
    indices=A['arr_0']
    y, X = yS[indices], XS[indices]
    print(num)
    start = 0;end = start + num
    y_train, X_train = y[start:end], X[start:end, :]
    with tf.Session() as sess:

        summary_writer = tf.summary.FileWriter(tb_models_dir, sess.graph)

        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)
        for grad, var in grad_op:
            if grad is not None:
                tf.summary.histogram(var.op.name + '_grad', grad)
        merged = tf.summary.merge_all()

        sess.run(tf.global_variables_initializer())
        Ctrn = [];        Cval = [];        Tacc = [];        Vacc = [];
        val_cost = eval_cost(cost, X_test, y_test, batch_size)
        val_acc = eval_accuracy(prediction_op, X_test, y_test, batch_size)
        Cval += [val_cost];        Vacc += [val_acc]
        for epoch in range(epochs):
            y_train_fold, X_train_fold = y_train, X_train
            for tx, ty in get_next_batch(batch_size, X_train_fold, y_train_fold):
                _, summ=sess.run([optimizer, merged], feed_dict={img_placeholder: tx, labels: ty, learning_rate_ph: learning_rate})
            training_cost = eval_cost(cost, X_train_fold, y_train_fold, batch_size)
            val_cost = eval_cost(cost, X_test, y_test, batch_size)
            training_acc = eval_accuracy(prediction_op, X_train_fold, y_train_fold, batch_size)
            val_acc = eval_accuracy(prediction_op, X_test, y_test, batch_size)
            Ctrn += [training_cost];            Cval += [val_cost]
            Tacc += [training_acc];            Vacc += [val_acc]
            print(training_cost, val_cost, training_acc, val_acc)

            summary_writer.add_summary(summ, epoch)
            summary_writer.flush()

        # np.savez('../random_order/entropy_select_ind'+str(t), indices)
        # np.savez('../random_order/entropy_select_Vacc'+str(t), Vacc)
        xl = np.linspace(1, len(Vacc), len(Vacc))
        plt.plot(xl, Vacc, label='order_'+str(t) )
        Acc+=[np.mean(Vacc[-5:])]
# plt.show()
# plt.close()
# plt.figure()
print(Acc)
# plt.plot(Num, Acc, marker='o', label=str(num)+'_val')
plt.legend()
plt.ylabel('Acc')
plt.xlabel("Epoch")
plt.grid()
plt.savefig(os.path.join(plt_folder, '{}.png'.format('Iou')))
plt.show()
plt.close()
