# coding: utf-8

from Prework import *

IMAGE_DIRECTORY, TRAINING_IMAGES_DIR, VAL_IMAGES_DIR = get_directories()
print(IMAGE_DIRECTORY, TRAINING_IMAGES_DIR, VAL_IMAGES_DIR)

training_images, training_labels, training_files = load_training_images(TRAINING_IMAGES_DIR)

shuffle_index = np.random.permutation(len(training_labels))
training_images = training_images[shuffle_index]
training_labels = training_labels[shuffle_index]
training_files = training_files[shuffle_index]

le = preprocessing.LabelEncoder()
training_le = le.fit(training_labels)
training_labels_encoded = training_le.transform(training_labels)
print("First 30 Training Labels", training_labels_encoded[0:30])

val_data = pd.read_csv(VAL_IMAGES_DIR + 'val_annotations.txt', sep='\t', header=None,
                       names=['File', 'Class', 'X', 'Y', 'H', 'W'])

val_images, val_labels, val_files = load_validation_images(VAL_IMAGES_DIR, val_data)
val_labels_encoded = training_le.transform(val_labels)
print(val_labels_encoded[0:30])

reset_graph()

X = tf.placeholder(tf.float32, shape=[None, n_inputs], name="X")
X_reshaped = tf.reshape(X, shape=[-1, height, width, channels])
y = tf.placeholder(tf.int32, shape=[None], name="y")


def get_next_batch(batchsize=10):
    for cursor in range(0, len(training_images), batchsize):
        batch = []
        batch.append(training_images[cursor:cursor + batchsize])
        batch.append(training_labels_encoded[cursor:cursor + batchsize])
        yield batch


def get_next_batch_val(batchsize=1):
    for cursor in range(0, len(val_images), batchsize):
        batch_val = []
        batch_val.append(val_images[cursor:cursor + batchsize])
        batch_val.append(val_labels_encoded[cursor:cursor + batchsize])
        yield batch_val


# input shape [-1, 64, 64, 3]
conv1 = tf.layers.conv2d(
    inputs=X_reshaped,
    filters=32,
    kernel_size=[3, 3],
    padding='SAME',
    activation=tf.nn.relu,
    name="conv1")

# shape after conv1: [-1, 64, 64, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)

conv2 = tf.layers.conv2d(
    inputs=pool1,
    filters=64,
    kernel_size=[3, 3],
    padding='SAME',
    activation=tf.nn.relu,
    name="conv2")

pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 8 * 8 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(inputs=dense, rate=0.3)
dropout_reshape = tf.reshape(dropout, [-1, 8 * 8 * 64])

# Logits Layer
logits = tf.layers.dense(inputs=dropout_reshape, units=200, name='output')
Y_proba = tf.nn.softmax(logits, name="Y_proba")

xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)

correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()

n_epochs = 200

with tf.Session() as sess:
    init.run()
    for epoch in range(n_epochs):
        acc_train = 0
        acc_val = 0
        for batch in get_next_batch():
            X_batch, y_batch = batch[0], batch[1]
            # print ('Training set', X_batch.shape, y_batch.shape)
            sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
            acc_train += accuracy.eval(feed_dict={X: X_batch, y: y_batch})

        for batch_val in get_next_batch_val():
            X_batch_val, y_batch_val = batch_val[0], batch_val[1]
            # sess.run(training_op, feed_dict={X: X_batch_val, y: y_batch_val})
            acc_val += accuracy.eval(feed_dict={X: X_batch_val, y: y_batch_val})

        if epoch % 10 == 0:
            learning_rate = learning_rate / 2

        print(epoch, "Train accuracy:", acc_train, "Val accuracy:", acc_val)
        # acc_test = accuracy.eval(feed_dict={X: val_images, y: val_labels_encoded})

        save_path = saver.save(sess, "/output_dir/tiny_imagenet")