import numpy as np
import tensorflow.compat.v1 as tf

from convert_cifar10_to_tfrecord import load_data

HEIGHT = 32
WIDTH = 32
DEPTH = 3

NUM_CLASSES = 10

dataset_dir = '../data/cifar-10-batches-py'

train_filenames = [
    'data_batch_1',
    'data_batch_2',
    'data_batch_3',
    'data_batch_4',
    'data_batch_5'
]

test_filenames = [
    'test_batch'
]


def dense_to_one_hot(labels_dense, num_classes):
    """Convert class labels from scalars to one-hot vectors."""
    num_labels = labels_dense.shape[0]
    index_offset = np.arange(num_labels) * num_classes
    labels_one_hot = np.zeros((num_labels, num_classes))
    labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
    return labels_one_hot


def get_data():
    train_images, train_labels, test_images, test_labels = load_data(dataset_dir, train_filenames, test_filenames)

    train_images = train_images.reshape((-1, 3, 32, 32))
    train_images = train_images.transpose(0, 2, 3, 1)
    train_labels = np.array(train_labels)
    train_labels = dense_to_one_hot(train_labels, 10)

    test_images = test_images.reshape((-1, 3, 32, 32))
    test_images = test_images.transpose(0, 2, 3, 1)
    test_labels = np.array(test_labels)
    test_labels = dense_to_one_hot(test_labels, 10)

    return train_images, train_labels, test_images, test_labels


def input_fn(filename, batch_size=1000, shuffle=False):
    def _parser(record):
        parsed = tf.io.parse_single_example(record, features={
            'image': tf.FixedLenFeature((), tf.string),
            'label': tf.FixedLenFeature((), tf.int64)
        })
        image = tf.decode_raw(parsed['image'], tf.uint8)
        image.set_shape([DEPTH * HEIGHT * WIDTH])
        # Reshape from [depth * height * width] to [depth, height, width].
        image = tf.cast(tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]), tf.float32)
        label = tf.cast(parsed['label'], tf.int32)
        return image, tf.one_hot(label, depth=NUM_CLASSES)

    def _input_fn():
        dataset = tf.data.TFRecordDataset(filename)
        dataset = dataset.map(_parser)
        if shuffle:
            dataset = dataset.shuffle(buffer_size=10000)
        dataset = dataset.repeat()
        dataset = dataset.batch(batch_size)
        iterator = dataset.make_one_shot_iterator()
        features, labels = iterator.get_next()
        return features, labels

    return _input_fn
