"""
## Setup
"""
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing import image_dataset_from_directory

"""
We rescale the images to take values in the range [0, 1].
"""


def scaling(input_image):
    input_image = input_image / 255.0
    return input_image


"""
## Crop and resize images
Let's process image data.
First, we convert our images from the RGB color space to the
[YUV colour space](https://en.wikipedia.org/wiki/YUV).
For the input data (low-resolution images),
we crop the image, retrieve the `y` channel (luninance),
and resize it with the `area` method (use `BICUBIC` if you use PIL).
We only consider the luminance channel
in the YUV color space because humans are more sensitive to
luminance change.
For the target data (high-resolution images), we just crop the image
and retrieve the `y` channel.
"""


# Use TF Ops to process.
def process_input(input, input_size, upscale_factor):
    input = tf.image.rgb_to_yuv(input)
    last_dimension_axis = len(input.shape) - 1
    y, u, v = tf.split(input, 3, axis=last_dimension_axis)
    return tf.image.resize(y, [input_size, input_size], method="area")


def process_target(input):
    input = tf.image.rgb_to_yuv(input)
    last_dimension_axis = len(input.shape) - 1
    y, u, v = tf.split(input, 3, axis=last_dimension_axis)
    return y


def get_datasets(crop_size=300, upscale_factor=3, batch_size=8):
    """
    ## Load data: BSDS500 dataset
    ### Download dataset
    We use the built-in `keras.utils.get_file` utility to retrieve the dataset.
    """

    dataset_url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz"
    data_dir = keras.utils.get_file(origin=dataset_url, fname="BSR", untar=True)
    root_dir = os.path.join(data_dir, "BSDS500/data")

    """
    We create training and validation datasets via `image_dataset_from_directory`.
    """

    input_size = crop_size // upscale_factor

    train_ds = image_dataset_from_directory(
        root_dir,
        batch_size=batch_size,
        image_size=(crop_size, crop_size),
        validation_split=0.2,
        subset="training",
        seed=1337,
        label_mode=None,
    )

    valid_ds = image_dataset_from_directory(
        root_dir,
        batch_size=batch_size,
        image_size=(crop_size, crop_size),
        validation_split=0.2,
        subset="validation",
        seed=1337,
        label_mode=None,
    )

    # Scale from (0, 255) to (0, 1)
    train_ds = train_ds.map(scaling)
    valid_ds = valid_ds.map(scaling)

    train_ds = train_ds.map(
        lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
    )
    train_ds = train_ds.prefetch(buffer_size=32)

    valid_ds = valid_ds.map(
        lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
    )
    valid_ds = valid_ds.prefetch(buffer_size=32)

    return train_ds, valid_ds
