import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from sklearn.metrics import accuracy_score
import os

dataframe = pd.read_csv('../train.csv').as_matrix()
dataframe = dataframe[:3000, :]
y, X = dataframe[:, 0], dataframe[:, 1:]
y = y.reshape([-1, 1])
indices = list(range(len(X)))
np.random.shuffle(indices)
X = X[indices].astype(np.float32)
y = y[indices].astype(np.float32)
# Split it into train and validation
n_train = int(0.7 * len(y))
y_train, X_train, y_test, X_test = y[: n_train], X[: n_train, :], y[n_train:], X[n_train:, :]
X_train -= 127.0
X_train /= 127.0
X_test -= 127.0
X_test /= 127.0

def get_next_batch(batch_size, X, y):
    for i in range(0, len(y), batch_size):
        start = i
        end = min(len(y), i + batch_size)
        yield X[start:end, :], y[start:end]

def nn(alpha=1, beta=1):
    img_placeholder = tf.placeholder(dtype=tf.float32, shape=(None, X_train.shape[1]), name='input_image')
    labels = tf.placeholder(dtype=tf.int32, shape=(None, 1), name='labels')
    learning_rate_ph = tf.placeholder(dtype=tf.float32, name='learning_rate')

    input_2d = tf.reshape(img_placeholder, [-1, 28, 28, 1], name='reshaped_image')
    l1_1 = tf.layers.conv2d(input_2d, 8, 3, 1, activation=tf.nn.relu, name='conv1', padding='same')
    l1_combine = alpha * l1_1 + beta * input_2d
    l1_2 = tf.layers.max_pooling2d(l1_combine, 2, 2, name='pool1')
    l1_5 = tf.layers.flatten(l1_2, name='flatten')
    l2 = tf.layers.dense(l1_5, 32, activation=tf.nn.relu, name='dense32')
    out = tf.layers.dense(l2, 10, name='dense10')
    predictions = tf.identity(out, name='predictions')
    print(l1_1.shape, l1_2.shape, l1_5.shape)
    # Define the cost
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=tf.one_hot(labels, depth=10)),
                          name='cost')
    # Apply an optimizer
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_ph).minimize(cost)
    return img_placeholder, labels, learning_rate_ph, cost, optimizer, predictions

def eval_cost(cost_op, X, y, batch_sz):
    total_cost = 0
    nb_batches = 0
    for batch_X, batch_y in get_next_batch(batch_size, X, y):
        feed_dict = {img_placeholder: batch_X, labels: batch_y}
        total_cost += cost.eval(feed_dict=feed_dict)
        nb_batches += 1
    return total_cost / nb_batches

def eval_accuracy(output_op, X, y, batch_sz):
    nb_batches = 0
    total_acc = 0
    for batch_X, batch_y in get_next_batch(batch_sz, X, y):
        feed_dict = {img_placeholder: batch_X, labels: batch_y}
        y_predicted = np.argmax(output.eval(feed_dict=feed_dict), 1)
        total_acc += accuracy_score(batch_y, y_predicted)
        nb_batches += 1
    return 100 * total_acc / nb_batches

epochs = 50

batch_size = 512
learning_rate = 0.001
batches_per_epoch = int(X_train.shape[0] / batch_size)
n_train = int(len(y_train) * 0.8)
models_dir = '../checkpoints_5'
plt_folder = models_dir
plt.figure()

Beta = [0,  0.5, 1 ]
for b in Beta:
    tf.reset_default_graph()
    img_placeholder, labels, learning_rate_ph, cost, optimizer, output = nn(beta=b)
    # Launch the graph
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        Ctrn = [];        Cval = [];        Tacc = [];        Vacc = [];
        for epoch in range(epochs):
            # Shuffle data for cross validation
            indices = np.array(range(len(y_train)))
            np.random.shuffle(indices)
            train_indices = indices[: n_train]
            val_indices = indices[n_train:]
            y_train_fold, X_train_fold, y_val_fold, X_val_fold = y_train[train_indices], X_train[train_indices, :], \
                                                                 y_train[val_indices], X_train[val_indices, :]
            # Loop over all training batches
            for x, y in get_next_batch(batch_size, X_train_fold, y_train_fold):
                sess.run(optimizer,
                         feed_dict={img_placeholder: x, labels: y, learning_rate_ph: learning_rate})
            training_cost = eval_cost(cost, X_train_fold, y_train_fold, batch_size)
            val_cost = eval_cost(cost, X_test, y_test, batch_size)
            training_acc = eval_accuracy(output, X_train_fold, y_train_fold, batch_size)
            val_acc = eval_accuracy(output, X_test, y_test, batch_size)
            Ctrn += [training_cost];
            Cval += [val_cost]
            Tacc += [training_acc];
            Vacc += [val_acc]
            print(training_cost, training_cost, training_acc, val_acc)
        xl = np.linspace(1, len(Tacc), len(Tacc))
        plt.plot(xl, Tacc,linestyle='-', label=str(b)+'_train')
        # plt.plot(xl, Vacc, linestyle='-.', label=str(b)+'_val')
plt.legend()
plt.ylabel('Acc')
plt.xlabel("Epoch")
plt.grid()
plt.savefig(os.path.join(plt_folder, '{}.png'.format('Iou')))
plt.show()
# plt.close()


