#!/usr/bin/env python
# coding: utf8

""" This package provide an estimator builder as well as model functions. """

import importlib

# pylint: disable=import-error
import tensorflow as tf

from .metrics import mean_iou, dice_coefficient

# pylint: enable=import-error

__email__ = 'research@deezer.com'
__author__ = 'Deezer Research'
__license__ = 'MIT License'

placeholder = tf.compat.v1.placeholder


def get_model_function(model_type):
    """
        Get tensorflow function of the model to be applied to the input tensor.
        For instance "unet.softmax_unet" will return the softmax_unet function
        in the "unet.py" submodule of the current module (segment.model).

        Params:
        - model_type: str
        the relative module path to the model function.

        Returns:
        A tensorflow function to be applied to the input tensor to get the
        multitrack output.
    """
    relative_path_to_module = '.'.join(model_type.split('.')[:-1])
    model_name = model_type.split('.')[-1]
    main_module = '.'.join((__name__, 'functions'))
    path_to_module = f'{main_module}.{relative_path_to_module}'
    module = importlib.import_module(path_to_module)
    model_function = getattr(module, model_name)
    return model_function


class InputProvider(object):

    def __init__(self, params):
        self.params = params

    def get_input_dict_placeholders(self):
        raise NotImplementedError()

    @property
    def input_names(self):
        raise NotImplementedError()

    def get_feed_dict(self, features, *args):
        raise NotImplementedError()

    def get_feed_tensor(self, features):
        raise NotImplementedError()


class ImageInputProvider(InputProvider):

    def __init__(self, params):
        super().__init__(params)
        self.image_input_name = "image_raw"

    @property
    def input_names(self):
        return ["image_raw", self.image_input_name]

    def get_input_dict_placeholders(self):
        shape = (None, self.params['H'], self.params['W'], self.params['n_channels'])
        features = {
            self.image_input_name: placeholder(tf.float32,
                                               shape=shape,
                                               name=self.image_input_name)
        }
        return features

    def get_feed_dict(self, features, image_raw):
        return {features[self.image_input_name]: image_raw}

    def get_feed_tensor(self, features):
        return features[self.image_input_name]


class InputProviderFactory(object):

    @staticmethod
    def get(params):
        return ImageInputProvider(params)


class EstimatorSpecBuilder(object):
    """ A builder class that allows to builds a multitrack unet model
    estimator. The built model estimator has a different behaviour when
    used in a train/eval mode and in predict mode.

    * In train/eval mode:   it takes as input and outputs magnitude spectrogram
    * In predict mode:      it takes as input and outputs waveform. The whole
                            separation process is then done in this function
                            for performance reason: it makes it possible to run
                            the whole spearation process (including STFT and
                            inverse STFT) on GPU.

    :Example:

    >>> from segment.model import EstimatorSpecBuilder
    >>> builder = EstimatorSpecBuilder()
    >>> builder.build_predict_model()
    >>> builder.build_evaluation_model()
    >>> builder.build_train_model()

    >>> from segment.model import model_fn
    >>> estimator = tf.estimator.Estimator(model_fn=model_fn, ...)
    """

    # Supported model functions.
    DEFAULT_MODEL = 'image_unet.image_unet'

    # Supported loss functions.
    SIGMOID = 'sigmoid'
    SOFTMAX = 'softmax'

    # Supported optimizers.
    ADADELTA = 'Adadelta'
    SGD = 'SGD'

    # Math constants.
    WINDOW_COMPENSATION_FACTOR = 2. / 3.
    EPSILON = 1e-10

    def __init__(self, features, params):
        """ Default constructor. Depending on built model
        usage, the provided features should be different:

        * In train/eval mode:   features is a dictionary with a
                                "mix_spectrogram" key, associated to the
                                mix magnitude spectrogram.
        * In predict mode:      features is a dictionary with a "waveform"
                                key, associated to the waveform of the sound
                                to be separated.

        :param features: The input features for the estimator.
        :param params: Some hyperparameters as a dictionary.
        """

        self._features = features
        self._params = params
        # Get instrument name.
        self._instruments = params['instrument_list']
        self._image = self._instruments[0]
        self._mask = self._instruments[1]

    def _build_model_outputs(self):
        """ Created a batch_sizexTxFxn_channels input tensor containing
        mix magnitude spectrogram, then an output dict from it according
        to the selected model in internal parameters.

        :returns: Build output dict.
        :raise ValueError: If required model_type is not supported.
        """

        input_tensor = self.image_feature
        model = self._params.get('model', None)
        if model is not None:
            model_type = model.get('type', self.DEFAULT_MODEL)
        else:
            model_type = self.DEFAULT_MODEL
        try:
            apply_model = get_model_function(model_type)
        except ModuleNotFoundError:
            raise ValueError(f'No model function {model_type} found')
        self._model_outputs = apply_model(
            input_tensor,
            self._params['model']['params'])

    def create_mask(self, pred_mask):
        pred_mask = tf.argmax(pred_mask, axis=-1)
        pred_mask = pred_mask[..., tf.newaxis]
        return pred_mask

    def _build_loss(self, labels):
        """ Construct tensorflow loss and metrics

        :param output_dict: dictionary of network outputs (key: instrument
            name, value: estimated spectrogram of the instrument)
        :param labels: dictionary of target outputs (key: instrument
            name, value: ground truth spectrogram of the instrument)
        :returns: tensorflow (loss, metrics) tuple.
        """
        output = self.model_outputs
        output_shape = tf.shape(output)
        mask_raw = labels['mask_raw']
        # mask_shape = tf.shape(mask_raw)
        # mask_raw = tf.image.resize_with_crop_or_pad(mask_raw, output_shape[1], output_shape[2])
        # mask_raw = tf.reshape(mask_raw, (mask_shape[0], output_shape[1], output_shape[2], mask_shape[3]))
        loss_type = self._params.get('loss_type', self.SIGMOID)
        if loss_type == self.SIGMOID:
            losses = tf.reduce_mean(tf.keras.losses.binary_crossentropy(mask_raw, output))
            output_predictions = tf.where(output < 0.5, x=tf.zeros_like(output), y=tf.ones_like(output))
            metrics = dict({'accuracy': tf.compat.v1.metrics.accuracy(mask_raw, output_predictions, name='acc_op'),
                            'iou': tf.compat.v1.metrics.mean_iou(mask_raw, output_predictions, num_classes=2,
                                                                 name='iou_op')})
        elif loss_type == self.SOFTMAX:
            losses = tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(mask_raw, output))
            output_predictions = self.create_mask(output)
            metrics = dict(
                {'accuracy': tf.compat.v1.metrics.mean_per_class_accuracy(mask_raw, output_predictions, num_classes=3,
                                                                          name='acc_op'),
                 'iou': tf.compat.v1.metrics.mean_iou(mask_raw, output_predictions, num_classes=3, name='iou_op')})
        else:
            raise ValueError(f"Unkwnown loss type: {loss_type}")
        return losses, metrics

    def _build_optimizer(self):
        """ Builds an optimizer instance from internal parameter values.

        Default to AdamOptimizer if not specified.

        :returns: Optimizer instance from internal configuration.
        """
        name = self._params.get('optimizer')
        if name == self.ADADELTA:
            return tf.compat.v1.train.AdadeltaOptimizer()
        rate = self._params['learning_rate']
        if name == self.SGD:
            return tf.compat.v1.train.GradientDescentOptimizer(rate)
        return tf.compat.v1.train.AdamOptimizer(rate)

    @property
    def instruments(self):
        return self._instruments

    @property
    def image_raw_name(self):
        return f'{self._image}_raw'

    @property
    def model_outputs(self):
        if not hasattr(self, "_model_outputs"):
            self._build_model_outputs()
        return self._model_outputs

    @property
    def outputs(self):
        if not hasattr(self, "_outputs"):
            self._build_outputs()
        return self._outputs

    @property
    def image_feature(self):
        return self._features[self.image_raw_name]

    def _build_outputs(self):
        output = self.model_outputs
        self._outputs = output

    def build_predict_model(self):
        """ Builder interface for creating model instance that aims to perform
        prediction / inference over given track. The output of such estimator
        will be a dictionary with a "<instrument>" key per separated instrument
        , associated to the estimated separated waveform of the instrument.

        :returns: An estimator for performing prediction.
        """

        return tf.estimator.EstimatorSpec(
            tf.estimator.ModeKeys.PREDICT,
            predictions=self.outputs)

    def build_evaluation_model(self, labels):
        """ Builder interface for creating model instance that aims to perform
        model evaluation. The output of such estimator will be a dictionary
        with a key "<instrument>_spectrogram" per separated instrument,
        associated to the estimated separated instrument magnitude spectrogram.

        :param labels: Model labels.
        :returns: An estimator for performing model evaluation.
        """
        loss, metrics = self._build_loss(labels)
        return tf.estimator.EstimatorSpec(
            tf.estimator.ModeKeys.EVAL,
            loss=loss,
            eval_metric_ops=metrics)

    def build_train_model(self, labels):
        """ Builder interface for creating model instance that aims to perform
        model training. The output of such estimator will be a dictionary
        with a key "<instrument>_spectrogram" per separated instrument,
        associated to the estimated separated instrument magnitude spectrogram.

        :param labels: Model labels.
        :returns: An estimator for performing model training.
        """
        loss, metrics = self._build_loss(labels)
        optimizer = self._build_optimizer()
        train_operation = optimizer.minimize(
            loss=loss,
            global_step=tf.compat.v1.train.get_global_step())
        return tf.estimator.EstimatorSpec(
            mode=tf.estimator.ModeKeys.TRAIN,
            loss=loss,
            train_op=train_operation,
            eval_metric_ops=metrics,
        )


def model_fn(features, labels, mode, params, config):
    """
    :param features:
    :param labels:
    :param mode: Estimator mode.
    :param params:
    :param config: TF configuration (not used).
    :returns: Built EstimatorSpec.
    :raise ValueError: If estimator mode is not supported.
    """
    builder = EstimatorSpecBuilder(features, params)
    if mode == tf.estimator.ModeKeys.PREDICT:
        return builder.build_predict_model()
    elif mode == tf.estimator.ModeKeys.EVAL:
        return builder.build_evaluation_model(labels)
    elif mode == tf.estimator.ModeKeys.TRAIN:
        return builder.build_train_model(labels)
    raise ValueError(f'Unknown mode {mode}')
