import datetime

import tensorflow as tf

from model.tf.densenet import densenet121
from model.tf.plainnet import PlainNet
from notebook.tiny_image_net import load_data

AUTOTUNE = tf.data.experimental.AUTOTUNE


def _set_up():
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        # Restrict TensorFlow to only allocate 1GB of memory on the first GPU
        try:
            tf.config.experimental.set_virtual_device_configuration(
                gpus[0],
                [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2048)])
            logical_gpus = tf.config.experimental.list_logical_devices('GPU')
            print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
        except RuntimeError as e:
            # Virtual devices must be set before GPUs have been initialized
            print(e)


@tf.function
def _train_step(x, y, model, optimizer, loss_object, train_loss, train_accuracy):
    # Use tf.GradientTape to train the model.
    with tf.GradientTape() as tape:
        y_pred = model(x, training=True)
        loss = loss_object(y, y_pred)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    train_loss(loss)
    train_accuracy(y, y_pred)


@tf.function
def _test_step(x, y, model, loss_object, test_loss, test_accuracy):
    y_pred = model(x)
    t_loss = loss_object(y, y_pred)
    test_loss(t_loss)
    test_accuracy(y, y_pred)


def run(train_dataset, test_dataset, model, optimizer, epochs=100):
    loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
    # Select metrics to measure the loss and the accuracy of the model
    train_loss = tf.keras.metrics.Mean(name='train_loss', dtype=tf.float32)
    train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
    test_loss = tf.keras.metrics.Mean(name='test_loss', dtype=tf.float32)
    test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')

    # Set up summary writers to write the summaries to disk in a different logs directory
    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    train_log_dir = 'logs/gradient_tape/' + current_time + '-' + str(model.name) + '/train'
    test_log_dir = 'logs/gradient_tape/' + current_time + '-' + str(model.name) + '/test'
    train_summary_writer = tf.summary.create_file_writer(train_log_dir)
    test_summary_writer = tf.summary.create_file_writer(test_log_dir)

    for epoch in range(epochs):
        for x_train, y_train in train_dataset:
            _train_step(x_train, y_train, model, optimizer, loss_object, train_loss, train_accuracy)
        with train_summary_writer.as_default():
            tf.summary.scalar('loss', train_loss.result(), step=epoch)
            tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)

        if epoch > 0 and (epoch + 1) % 2 == 0:
            for x_test, y_test, in test_dataset:
                _test_step(x_test, y_test, model, loss_object, test_loss, test_accuracy)
            with test_summary_writer.as_default():
                tf.summary.scalar('loss', test_loss.result(), step=epoch)
                tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
            template = '=> Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
            print(template.format(epoch + 1, train_loss.result(), train_accuracy.result() * 100, test_loss.result(),
                                  test_accuracy.result() * 100))
        else:
            template = '=> Epoch {}, Loss: {}, Accuracy: {}'
            print(template.format(epoch + 1, train_loss.result(), train_accuracy.result() * 100))
        # Reset the metrics for the next epoch
        train_loss.reset_states()
        train_accuracy.reset_states()
        test_loss.reset_states()
        test_accuracy.reset_states()

    # Save model
    tf.saved_model.save(model, "saved\\" + current_time)


def convert(image, label):
    # Cast and normalize the image to [0,1]
    image = tf.image.convert_image_dtype(image, tf.float32)
    return image, label


def augment(image, label):
    image, label = convert(image, label)
    # Add 8 pixels of padding
    image = tf.image.resize_with_crop_or_pad(image, 40, 40)
    # Random crop back to 32x32
    image = tf.image.random_crop(image, size=[32, 32, 3])
    # Random brightness
    image = tf.image.random_brightness(image, max_delta=0.5)
    # Randomly flip an image horizontally
    image = tf.image.random_flip_left_right(image)
    # Randomly flips an image vertically (upside down).
    image = tf.image.random_flip_up_down(image)
    return image, label


def augment2(image, label):
    image, label = convert(image, label)
    # Add 8 pixels of padding
    image = tf.image.resize_with_crop_or_pad(image, 70, 70)
    # Random crop back to 32x32
    image = tf.image.random_crop(image, size=[64, 64, 3])
    # Random brightness
    image = tf.image.random_brightness(image, max_delta=0.5)
    # Randomly flip an image horizontally
    image = tf.image.random_flip_left_right(image)
    # Randomly flips an image vertically (upside down).
    image = tf.image.random_flip_up_down(image)
    return image, label


def train_cifar10():
    # Load and prepare the cifar10 dataset.
    batch_size = 32
    learning_rate = 0.0001

    cifar10 = tf.keras.datasets.cifar10
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    # Use tf.data to batch and shuffle the dataset
    train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)) \
        .shuffle(50000, reshuffle_each_iteration=True) \
        .map(augment, num_parallel_calls=AUTOTUNE).batch(batch_size)
    test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)) \
        .map(convert, num_parallel_calls=AUTOTUNE).batch(batch_size)

    model = PlainNet()
    optimizer = tf.keras.optimizers.Adam(learning_rate)
    run(train_ds, test_ds, model, optimizer, epochs=500)


def train_tiny_image_net():
    batch_size = 50
    learning_rate = 0.0001

    train_dataset = load_data('train') \
        .shuffle(100000, reshuffle_each_iteration=True) \
        .map(augment2, num_parallel_calls=AUTOTUNE).batch(batch_size)
    test_dataset = load_data('val').map(convert, num_parallel_calls=AUTOTUNE).batch(batch_size)

    model = densenet121()
    optimizer = tf.keras.optimizers.Adam(learning_rate)
    run(train_dataset, test_dataset, model, optimizer)


if __name__ == '__main__':
    _set_up()
    train_cifar10()
    # train_tiny_image_net()
