#!/usr/bin/env python
# coding: utf8

"""
    Module for building data preprocessing pipeline using the tensorflow
    data API. Data preprocessing such as audio loading, spectrogram
    computation, cropping, feature caching or data augmentation is done
    using a tensorflow dataset object that output a tuple (input_, output)
    where:

    -   input is a dictionary with a single key that contains the (batched)
        mix spectrogram of audio samples
    -   output is a dictionary of spectrogram of the isolated tracks
        (ground truth)
"""

import os
import time
from os.path import exists, sep as SEPARATOR

import numpy as np
# pylint: disable=import-error
import tensorflow as tf
from PIL import Image

from .utils.logging import get_logger
from .utils.tensor import (dataset_from_csv, set_tensor_shape)


# pylint: enable=import-error

def get_training_dataset(params, data_path):
    """ Builds training dataset.

    :param params: parameters.
    :param data_path: Path of directory containing data.
    :returns: Built dataset.
    """
    builder = DatasetBuilder(
        params,
        data_path,
        random_seed=params.get('random_seed', 0))
    return builder.build(
        params.get('train_csv'),
        cache_directory=params.get('training_cache'),
        batch_size=params.get('batch_size'),
        wait_for_cache=False)


def get_validation_dataset(params, data_path):
    """ Builds validation dataset.

    :param params: Audio parameters.
    :param data_path: Path of directory containing data.
    :returns: Built dataset.
    """
    builder = DatasetBuilder(params, data_path)
    return builder.build(
        params.get('validation_csv'),
        batch_size=params.get('batch_size'),
        cache_directory=params.get('validation_cache'),
        infinite_generator=False,
        shuffle=False,
    )


class DatasetBuilder(object):
    """
    """

    # Wait period for cache (in seconds).
    WAIT_PERIOD = 60

    def __init__(self, params, data_path, random_seed=0):
        """ Default constructor.

        :param params: parameters to use.
        :param data_path:
        :param random_seed:
        """
        self._params = params
        self._data_path = data_path
        self._train_dir = params.get('train_dir')
        self._instruments = params['instrument_list']
        self._instrument_builders = None
        self._image_name = self._instruments[0]
        self._mask_name = self._instruments[1]
        self._H = params['H']
        self._W = params['W']
        self._n_channels = params['n_channels']
        self._data_augmentation = params.get('data_augmentation')
        self._random_seed = random_seed

    def map_features(self, sample):
        """ Select features and annotation of the given sample. """
        input_ = {
            f'{self._image_name}_raw':
                sample[f'{self._image_name}_raw']}
        output = {
            f'{self._mask_name}_raw':
                sample[f'{self._mask_name}_raw']}
        return input_, output

    def expand_path(self, sample):
        """ Expands audio paths for the given sample. """
        return dict(sample, **{f'{instrument}_path': tf.strings.join(
            (self._data_path, sample[f'{instrument}_path']), SEPARATOR)
            for instrument in self._instruments})

    def load_image_raw(self, image_descriptor, size, name):
        def safe_load(path, shape):
            image = Image.open(path.numpy())
            image = image.resize(shape.numpy())
            img_raw = np.array(image)
            if img_raw.ndim == 2:
                img_raw = np.expand_dims(img_raw, -1)
            img_raw = img_raw / 255.0
            if not img_raw.dtype == np.dtype(b'float32'):
                img_raw = img_raw.astype(b'float32')
            get_logger().info(f'load image ${path} successful, image shape:${img_raw.shape}')
            return img_raw

        result = tf.py_function(
            safe_load,
            [image_descriptor, size],
            tf.float32
        )
        return {name: result}

    def load_mask_raw(self, image_descriptor, size, name):
        def safe_load(path, shape):
            mask = Image.open(path.numpy())
            mask = mask.resize(shape.numpy())
            mask_raw = np.array(mask)
            mask_raw = mask_raw - 1
            if mask_raw.ndim == 2:
                mask_raw = np.expand_dims(mask_raw, -1)
            get_logger().info(f'load mask ${path} successful, mask shape:${mask_raw.shape}, datatype:${mask_raw.dtype}')
            return mask_raw

        result = tf.py_function(
            safe_load,
            [image_descriptor, size],
            tf.uint8
        )
        return {name: result}

    def random_data_augment(self, sample):
        # return dict(sample, **sync_apply({
        #     f'{instrument}_raw':
        #         sample[f'{instrument}_raw']
        #     for instrument in self._instruments},
        #     lambda x: random_time_stretch(
        #         x, factor_min=0.9, factor_max=1.1)))
        pass

    def cache(self, dataset, cache, wait):
        """ Cache the given dataset if cache is enabled. Eventually waits for
        cache to be available (useful if another process is already computing
        cache) if provided wait flag is True.

        :param dataset: Dataset to be cached if cache is required.
        :param cache: Path of cache directory to be used, None if no cache.
        :param wait: If caching is enabled, True is cache should be waited.
        :returns: Cached dataset if needed, original dataset otherwise.
        """
        if cache is not None:
            if wait:
                while not exists(f'{cache}.index'):
                    get_logger().info(
                        'Cache not available, wait %s',
                        self.WAIT_PERIOD)
                    time.sleep(self.WAIT_PERIOD)
            cache_path = os.path.split(cache)[0]
            os.makedirs(cache_path, exist_ok=True)
            return dataset.cache(cache)
        return dataset

    @property
    def instruments(self):
        """ Instrument dataset builder generator.

        :yield InstrumentBuilder instance.
        """
        if self._instrument_builders is None:
            self._instrument_builders = []
            for instrument in self._instruments:
                self._instrument_builders.append(
                    InstrumentDatasetBuilder(self, instrument))
        for builder in self._instrument_builders:
            yield builder

    def build(
            self,
            train_path,
            batch_size=8,
            shuffle=True,
            infinite_generator=True,
            cache_directory=None,
            wait_for_cache=False,
            random_data_augmentation=False,
            num_parallel_calls=4):
        """
        TO BE DOCUMENTED.
        """
        dataset = dataset_from_csv(train_path)
        if shuffle:
            dataset = dataset.shuffle(
                buffer_size=40000,
                seed=self._random_seed,
                # useless since it is cached :
                reshuffle_each_iteration=True)

        dataset = dataset.map(self.expand_path)
        N = num_parallel_calls

        for instrument in self.instruments:
            dataset = (
                dataset
                    .map(instrument.load_image, num_parallel_calls=N)
                    .map(instrument.reshape, num_parallel_calls=N)
            )
        dataset = self.cache(dataset, cache_directory, wait_for_cache)
        if infinite_generator:
            dataset = dataset.repeat(count=-1)
        dataset = dataset.map(self.map_features)
        M = 8  # Parallel call post caching.
        if random_data_augmentation:
            dataset = (
                dataset
                    .map(self.random_data_augment, num_parallel_calls=M))
        # Make batch (done after selection to avoid
        # error due to unprocessed instrument spectrogram batching).
        dataset = dataset.batch(batch_size)
        return dataset


class InstrumentDatasetBuilder(object):
    """ Instrument based filter and mapper provider. """

    def __init__(self, parent, instrument):
        """ Default constructor.

        :param parent: Parent dataset builder.
        :param instrument: Target instrument.
        """
        self._parent = parent
        self._instrument = instrument
        self._image_raw_key = f'{instrument}_raw'

    def reshape(self, sample):
        if self._instrument == 'image':
            return dict(sample, **{
                self._image_raw_key: set_tensor_shape(
                    sample[self._image_raw_key],
                    (self._parent._H, self._parent._W, self._parent._n_channels))})
        else:
            return dict(sample, **{
                self._image_raw_key: set_tensor_shape(
                    sample[self._image_raw_key],
                    (self._parent._H, self._parent._W, 1))})

    def load_image(self, sample):
        """ Load waveform for given sample. """
        if self._instrument == 'image':
            return dict(sample, **self._parent.load_image_raw(
                sample[f'{self._instrument}_path'], (self._parent._H, self._parent._W),
                name=self._image_raw_key))
        else:
            return dict(sample, **self._parent.load_mask_raw(
                sample[f'{self._instrument}_path'], (self._parent._H, self._parent._W),
                name=self._image_raw_key))
