from __future__ import division, print_function, absolute_import

import tensorflow.compat.v1 as tf
from sklearn.model_selection import train_test_split

from cifar10 import get_data
from cnn_raw import *

if __name__ == '__main__':
    runtime_dir = './cnn_raw/'
    train_images, train_labels, test_images, test_labels = get_data()
    X_train, X_val, Y_train, Y_val = train_test_split(train_images, train_labels, test_size=0.02)
    print(X_train.shape, Y_train.shape, X_val.shape, Y_val.shape)

    # Network Parameters
    width, height, channel = 32, 32, 3  # data input (img shape: 32*32*3)
    num_classes = 10  # total classes (0-9 digits)
    dropout = 0.75  # Dropout, probability to keep units

    # tf Graph input
    X = tf.placeholder(tf.float32, [None, width, height, channel], name='X')
    Y = tf.placeholder(tf.float32, [None, num_classes], name='Y')

    # Training Parameters
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')  # dropout (keep probability)
    params = {
        'learning_rate': 0.001,
        'keep_prob': keep_prob,
    }

    loss_op, train_op, accuracy = cnn_model_fn(X, Y, params)

    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()

    tf.summary.scalar("loss", loss_op)
    tf.summary.scalar("accuracy", accuracy)
    for var in tf.trainable_variables():
        tf.summary.histogram(var.name, var)
    merged_summary_op = tf.summary.merge_all()

    saver = tf.train.Saver(max_to_keep=0)

    with tf.Session() as sess:
        sess.run(init)

        summary_writer = tf.summary.FileWriter(runtime_dir, graph=tf.get_default_graph())

        num_epochs = 50
        batch_size = 100
        display_step = 50
        step = 0
        for epoch in range(num_epochs):
            m = X_train.shape[0]
            num_batch = m // batch_size
            for i in range(num_batch):
                batch_start = i * batch_size
                batch_end = batch_start + batch_size
                # print(batch_start, batch_end)
                batch_X = X_train[batch_start:batch_end]
                batch_Y = Y_train[batch_start:batch_end]
                # print(batch_Y[0])
                _, summary = sess.run([train_op, merged_summary_op], feed_dict={X: batch_X,
                                                                                Y: batch_Y,
                                                                                keep_prob: dropout})
                # sess.run(train_op, feed_dict={X: batch_X, Y: batch_Y, keep_prob: dropout})
                step = step + 1
                if step % display_step == 0 or step == 1:
                    summary_writer.add_summary(summary, epoch * num_batch + i)
                    loss, acc = sess.run([loss_op, accuracy], feed_dict={X: X_val, Y: Y_val, keep_prob: 1.0})
                    print("Step " + str(step),
                          "Minibatch Loss= " + "{:.4f}".format(loss),
                          "Training Accuracy= " + "{:.3f}".format(acc))

            if epoch > 0 and (epoch + 1) % 10 == 0:
                path_prefix = saver.save(sess, runtime_dir + 'cifar10', epoch + 1)
                print('path prefix is :', path_prefix)
        print("Optimization Finished!")
        saver.save(sess=sess, save_path=runtime_dir + 'cifar10')

        # Test model
        # Calculate accuracy
        m_test = test_images.shape[0]
        num_batch = m_test // batch_size
        accuracy_sum = 0.0
        for i in range(num_batch):
            batch_start = i * batch_size
            batch_end = batch_start + batch_size
            batch_X = test_images[batch_start:batch_end]
            batch_Y = test_labels[batch_start:batch_end]
            accuracy_sum += sess.run(accuracy, feed_dict={X: batch_X, Y: batch_Y, keep_prob: 1.0})
        print("Testing Accuracy:", accuracy_sum / num_batch)

        print("Run the command line: --> tensorboard --logdir=./cifar10/cnn_raw" +
              "Then open http://lynnzx-PC:6006/ into your web browser")
