from pathlib import Path

import tensorflow as tf

from notebook.image_utils import get_label_to_index, get_all_image_paths

DOT_TFRECORD = '.tfrecord'


def _bytes_feature(value):
    """Returns a bytes_list from a string / byte."""
    if isinstance(value, type(tf.constant(0))):
        value = value.numpy()  # BytesList won't unpack a string from an EagerTensor.
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


def _float_feature(value):
    """Returns a float_list from a float / double."""
    return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))


def _int64_feature(value):
    """Returns an int64_list from a bool / enum / int / uint."""
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


def _example(feature):
    return tf.train.Example(features=tf.train.Features(feature=feature))


def save_to_record(image_paths, labels, data_name='train'):
    assert len(image_paths) == len(labels)
    with tf.io.TFRecordWriter('./data/' + data_name + DOT_TFRECORD) as writer:
        for image_path, label in zip(image_paths, labels):
            image = open(image_path, 'rb').read()
            feature = {
                "image": _bytes_feature(image),
                "label": _int64_feature(label),
            }
            example = _example(feature)
            writer.write(example.SerializeToString())


def train():
    data_root_orig = 'E:\\Datasets\\Image Classification\\tiny-imagenet-200'
    data_root = Path(data_root_orig)
    label_to_index = get_label_to_index(data_root)
    all_image_paths = get_all_image_paths(data_root, 'train/*/images/*')
    all_image_labels = [label_to_index[Path(path).parent.parent.name] for path in all_image_paths]
    save_to_record(all_image_paths, all_image_labels, 'train')


def val():
    data_root_orig = 'E:\\Datasets\\Image Classification\\tiny-imagenet-200'
    data_root = Path(data_root_orig)
    label_to_index = get_label_to_index(data_root)
    all_image_paths = get_all_image_paths(data_root, 'val/images/*')
    all_image_paths = sorted(all_image_paths, key=lambda x: int(x.split('_')[-1].split('.')[0]))
    with open(data_root / 'val' / 'val_annotations.txt') as f:
        annotations = f.readlines()
    jpeg_to_label = {}
    for annotation in annotations:
        annotation_split = annotation.split()
        assert len(annotation_split) > 1 and annotation_split[1] in label_to_index
        jpeg_to_label[annotation_split[0]] = annotation_split[1]
    all_image_labels = [label_to_index[jpeg_to_label[Path(path).name]] for path in all_image_paths]
    save_to_record(all_image_paths, all_image_labels, 'val')


def load_data(data_name='train'):
    raw_image_dataset = tf.data.TFRecordDataset('./data/' + data_name + DOT_TFRECORD)
    image_feature_description = {
        'image': tf.io.FixedLenFeature([], tf.string),
        'label': tf.io.FixedLenFeature([], tf.int64),
    }

    def _parse_image_function(example_proto):
        # Parse the input tf.Example proto using the dictionary above.
        feature_dict = tf.io.parse_single_example(example_proto, image_feature_description)
        feature_dict['image'] = tf.image.decode_jpeg(feature_dict['image'], 3)
        # feature_dict['image'] = tf.image.resize(feature_dict['image'], [64, 64])
        # feature_dict['image'] /= 255.0  # normalize to [0,1] range
        return feature_dict['image'], feature_dict['label']

    parsed_image_dataset = raw_image_dataset.map(_parse_image_function)
    return parsed_image_dataset


if __name__ == '__main__':
    train()

    # val()

    # dataset = load_data('val').batch(100)
    # dataset = list(dataset.as_numpy_iterator())
    # for element in dataset:
    #     print(element['image'])
