import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import accuracy_score
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

in_channels = 3
dataframe = pd.read_hdf('./data/im1x3_tvd.h5')
y0, X0 = dataframe['label'], dataframe.iloc[:, :36300*in_channels]

indices = list(range(len(X0)))
np.random.shuffle(indices)

X = X0.iloc[indices, :].astype(np.float32).reset_index().drop('index', axis=1).values
y = y0[indices].astype(np.float32).reset_index().drop('index', axis=1)['label'].values
y = y.reshape([-1, 1])

nfold = 14
n_train = int((nfold-1)/nfold * len(y))
y_train, X_train, y_test, X_test = y[: n_train], X[: n_train, :], \
                                   y[n_train:], X[n_train:, :]
X_train -= 127.0
X_train /= 127.0
X_test -= 127.0
X_test /= 127.0


epochs = 50
batch_size = 20
learning_rate = 0.001
batches_per_epoch = int(X_train.shape[0] / batch_size)
n_Class = 3


img_placeholder = tf.placeholder(dtype=tf.float32, shape=(batch_size, X_train.shape[1]), name='input_image')
labels = tf.placeholder(dtype=tf.int32, shape=(batch_size, 1), name='labels')
learning_rate_ph = tf.placeholder(dtype=tf.float32, name='learning_rate')
input_2d = tf.reshape(img_placeholder, [-1, 110, 330, in_channels], name='reshaped_image')
l1_1 = tf.layers.conv2d(input_2d, 20, 3, 1, activation=tf.nn.relu, name='conv1')
l1_2 = tf.layers.max_pooling2d(l1_1, 2, 2, name='pool1')
l1_3 = tf.layers.conv2d(l1_2, 20, 3, 1, activation=tf.nn.relu, name='conv2')
l1_4 = tf.layers.max_pooling2d(l1_3, 2, 2, name='pool2')
l1_41 = tf.layers.conv2d(l1_4, 20, 3, 1, activation=tf.nn.relu, name='conv3')
l1_42 = tf.layers.max_pooling2d(l1_41, 2, 2, name='pool3')
l1_5 = tf.reshape(l1_42, [l1_42.shape[0].value, -1], name='flatten')
l2 = tf.layers.dense(l1_5, 32, activation=tf.nn.relu, name='dense32')
out = tf.layers.dense(l2, n_Class, name='dense10')
print(out.shape)
print(tf.one_hot(labels, depth=n_Class).shape)
predictions = tf.identity(out, name='predictions')
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=tf.one_hot(labels, depth=n_Class)),
                      name='cost')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_ph).minimize(cost)
output = out


def get_next_batch(batch_size, X, y):
    for i in range(0, len(y), batch_size):
        start = i
        end = min(len(y), i + batch_size)
        yield X[start:end, :], y[start:end]


def eval_cost(X, y):
    total_cost = 0
    nb_batches = 0
    for batch_X, batch_y in get_next_batch(batch_size, X, y):
        feed_dict = {img_placeholder: batch_X, labels: batch_y}
        total_cost += cost.eval(feed_dict=feed_dict)
        nb_batches += 1
    return total_cost / nb_batches


def eval_accuracy(X, y, batch_sz):
    nb_batches = 0
    total_acc = 0
    for batch_X, batch_y in get_next_batch(batch_sz, X, y):
        feed_dict = {img_placeholder: batch_X, labels: batch_y}
        y_predicted = np.argmax(output.eval(feed_dict=feed_dict), 1)
        total_acc += accuracy_score(batch_y, y_predicted)
        nb_batches += 1
    return 100 * total_acc / nb_batches


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(epochs):
        # Shuffle data for cross validation
        indices = np.array(range(len(y_train)))
        np.random.shuffle(indices)
        n_train1 = int((nfold-2)/(nfold-1) * len(y_train))
        train_indices = indices[: n_train1]
        val_indices = indices[n_train1:]

        y_train_fold, X_train_fold, y_val_fold, X_val_fold = y_train[train_indices], X_train[train_indices, :], \
                                                             y_train[val_indices], X_train[val_indices, :]
        # Loop over all training batches
        print('bef run')
        for bx, by in get_next_batch(batch_size, X_train_fold, y_train_fold):
            _, cost_val = sess.run([optimizer, cost],
                     feed_dict={img_placeholder: bx, labels: by, learning_rate_ph: learning_rate})
            # print(epoch, cost_val)

        if (epoch + 1):
            training_cost = eval_cost(X_train_fold, y_train_fold)
            training_acc = eval_accuracy(X_train_fold, y_train_fold, batch_size)
            valid_acc = eval_accuracy(X_val_fold, y_val_fold, batch_size)
            test_acc = eval_accuracy(X_test, y_test, batch_size)
            print(epoch+1, training_cost, training_acc, valid_acc, test_acc)

        model_path = './checkpoints_3channle-bili_1_14/mnist.ckpt'
        print("Optimization Finished!")
        saver = tf.train.Saver()
        saver.save(sess, model_path, epoch + 1)

