#! /usr/bin/env python3
import pyspark.ml

from gai.v2.base import HookEnabled, LoggingEnabled
from gai.v2.unify.transformer.variant import SparkTransformerWrapper


class IllegalArgumentError(ValueError):
    """Indicates some error with the validity of arguments.
    
    """
    pass


class SparkTransformer(pyspark.ml.Transformer, LoggingEnabled, HookEnabled):

    def __init__(self):
        pyspark.ml.Transformer.__init__(self)
        LoggingEnabled.__init__(self)
        HookEnabled.__init__(self)
        self._env_conf = dict()

    def _check_input_columns(self, dataset):
        """Check whether all used input columns are in ``dataset.columns``.
        If not, an ``IllegalArgumentError`` is raised.

        Args:
            dataset: the dataset to check

        """

        if getattr(self, "getInputCols", None) is None:
            return

        if self.getInputCols() is None:
            return

        if not set(self.getInputCols()) <= set(dataset.columns):
            raise IllegalArgumentError(
                "Class {}: Some of the used columns ({}) are not in the input "
                "dataframe's columns ({})".format(
                    self.__class__.__name__,
                    self.getInputCols(),
                    dataset.columns
                )
            )

    def _check_output_columns(self, dataset):
        """In the case that the transformer appends new column(s) to
        the current dataframe, check whether the names of output columns
        conflict with the dataframe's existing columns. If any conflict
        is found, an ``IllegalArgumentError`` is raised.

        Args:
            dataset: the dataset to check

        """
        if self._column_appending and \
                not set(dataset.columns).isdisjoint(self.getOutputCols()):
            raise IllegalArgumentError(
                "Class {}: output column names ({}) conflict with existing "
                "column names ({})".format(
                    self.__class__.__name__,
                    self.getOutputCols(),
                    dataset.columns
                )
            )

    @property
    def _column_appending(self):
        """

        Returns:
            boolean indicator whether the intention of the transformer is to
            append one or more new columns besides existing ones
        """
        return False

    def transform(self, dataset, params=None):
        self._check_input_columns(dataset)
        self._check_output_columns(dataset)

        self._exec_pre_hooks(dataset)
        dataset = super(SparkTransformer, self).transform(dataset, params)
        self._exec_post_hooks(dataset)
        return dataset

    def wrap(self, instance_name=None):
        """Wrap this ``pyspark.ml.Transformer`` as a ``v2.Transformer``.

        Returns:
            this object wrapped in ``SparkTransformerWrapper``
        """
        return SparkTransformerWrapper(self, instance_name=instance_name)
