#! /usr/bin/env python3

import pyspark.ml
from pyspark.ml.feature import HasOutputCols, HasInputCols, HasOutputCol
from pyspark.ml.param import Params, Param
from pyspark.sql import DataFrame
from pyspark.sql.types import ArrayType, MapType, StringType, StructType, StructField, Row

from gai.v2.spark.base import SparkTransformer
from gai.v2.utils import generate_unique_str, get_or_create_spark_session


class MultiColumnMapper(SparkTransformer, HasInputCols, HasOutputCols):
    """Given a multi-ary (of one or more operands) function, a list of input column
    names, and a list of output column names, an instance of ``MultiColumnMapper``
    can apply the function on the input columns of each row, and append the result
    as one or more columns.

    Args:
        fun: ``UserDefinedFunction`` whose return type is either ``ArrayType()``
            or ``MapType()`` or ``StructType()``
        inputCols: list of input column names, in the order of the parameters of ``fun``
        outputCols: list of output column names, in accordance with the return type of ``fun``

    Notes:
        The arity of ``fun`` should match the length of ``inputCols``. The dimension
        of the return type of ``fun`` should match the length of ``outputCols``.

    >>> from pyspark.sql import SparkSession
    >>> from pyspark.sql.functions import udf
    >>> from pyspark.sql.types import FloatType, ArrayType
    >>> spark = get_or_create_spark_session()
    >>> df = spark.createDataFrame([(2000.0, 11), (1000.0, 12),
    ...                             (3000.0, 20)], schema=("foo", "bar"))
    >>> def plus_a(x, y):
    ...     return [x + y]
    >>> plus_op = udf(plus_a, ArrayType(FloatType()))
    >>> col_mapper = MultiColumnMapper(fun=plus_op,
    ...                                inputCols=["foo", "bar"],
    ...                                outputCols=["foobar"])
    >>> col_mapper.transform(df).show()
    +------+---+------+
    |   foo|bar|foobar|
    +------+---+------+
    |2000.0| 11|2011.0|
    |1000.0| 12|1012.0|
    |3000.0| 20|3020.0|
    +------+---+------+
    <BLANKLINE>
    >>> def plain_minus(x, y):
    ...     return x - y
    ...
    >>> @udf(returnType=MapType(StringType(), FloatType()))
    ... def minus_op(x,y):
    ...     return {"result": plain_minus(x,y)}
    >>> col_mapper = MultiColumnMapper(fun=minus_op,
    ...                                inputCols=["foo", "bar"],
    ...                                outputCols=["result"])
    >>> col_mapper.transform(df).show()
    +------+---+------+
    |   foo|bar|result|
    +------+---+------+
    |2000.0| 11|1989.0|
    |1000.0| 12| 988.0|
    |3000.0| 20|2980.0|
    +------+---+------+
    <BLANKLINE>
    >>> @udf(returnType=StructType(fields=[StructField("sum", FloatType()),
    ...                                    StructField("diff", FloatType())]))
    ... def complex_op(x, y):
    ...     return Row(sum=x+y, diff=x-y)
    ...
    >>> col_mapper = MultiColumnMapper(fun=complex_op,
    ...                                inputCols=["foo", "bar"],
    ...                                outputCols=["diff", "sum"])
    >>> col_mapper.transform(df).show()
    +------+---+------+------+
    |   foo|bar|  diff|   sum|
    +------+---+------+------+
    |2000.0| 11|1989.0|2011.0|
    |1000.0| 12| 988.0|1012.0|
    |3000.0| 20|2980.0|3020.0|
    +------+---+------+------+
    <BLANKLINE>
    >>>
    """

    fun = Param(Params._dummy(), "fun", "user-defined function used to transform a record")

    def __init__(self, fun, inputCols, outputCols):
        super(MultiColumnMapper, self).__init__()
        self.setInputCols(inputCols)
        self.setOutputCols(outputCols)
        self.setFun(fun)

    def setFun(self, fun):
        """Sets the transformation function.

        Args:
            fun: ``UserDefinedFunction`` whose return type is either ``ArrayType()``
                or ``MapType()`` or ``StructType()``

        Returns:
            ``self``
        """
        if not (isinstance(fun.returnType, ArrayType) or
                isinstance(fun.returnType, MapType) or
                isinstance(fun.returnType, StructType)):
            raise RuntimeError(
                "A UDF with returnTpe being one of ArrayType, MapType "
                "and StructType is expected.")
        self._paramMap[self.fun] = fun
        return self

    def getFun(self):
        """
        Returns:
            a user-defined function used to transform a record
        """
        return self.getOrDefault(self.fun)

    @staticmethod
    def _compute_output_selectors(fun, dataset, tmpCol, outputCols):
        if isinstance(fun.returnType, ArrayType):
            output_selectors = [(col, dataset[tmpCol].getItem(index)) for index, col in enumerate(outputCols)]
        elif isinstance(fun.returnType, MapType):
            output_selectors = [(col, dataset[tmpCol].getItem(col)) for col in outputCols]
        elif isinstance(fun.returnType, StructType):
            output_selectors = [(col, dataset[tmpCol].getField(col)) for col in outputCols]
        else:
            raise RuntimeError("unexpected UserDefinedFunction")

        return output_selectors

    @property
    def _column_appending(self):
        return True

    def _transform(self, dataset: DataFrame):
        tmp_col = generate_unique_str(dataset.columns)
        input_selectors = [dataset[col] for col in self.getInputCols()]
        dataset = dataset.withColumn(tmp_col, self.getFun()(*input_selectors))

        output_selectors = MultiColumnMapper._compute_output_selectors(self.getFun(), dataset, tmp_col,
                                                                       self.getOutputCols())
        for col, selector in output_selectors:
            dataset = dataset.withColumn(col, selector)
        dataset = dataset.drop(tmp_col)
        return dataset


class ColumnMapper(SparkTransformer, HasInputCols, HasOutputCol):
    """Given a fixed mapping function (``fun``), a fixed list of ``inputCols``,
    and a fixed ``outputCol``, a ``ColumnMapper`` can transform the ``inputCols`` to
    ``outputCol`` via ``fun`` and append the result column to the original
    dataframe.


    Args:
        fun: ``UserDefinedFunction``
        inputCols: list of input column names, in the order of the parameters of ``fun``
        outputCol: output column name


    >>> from pyspark.sql import SparkSession
    >>> from pyspark.sql.functions import udf
    >>> from pyspark.sql.types import FloatType
    >>> spark = SparkSession.builder.master("local").getOrCreate()
    >>> df = spark.createDataFrame([(2000.0, 11), (1000.0, 12),
    ...                             (3000.0, 20)], schema=("foo", "bar"))
    >>> minus_op = udf(lambda x, y: x - y, FloatType())
    >>> col_mapper = ColumnMapper(fun=minus_op,
    ...                           inputCols=["foo", "bar"],
    ...                           outputCol="foobar")
    >>> col_mapper.transform(df).show()
    +------+---+------+
    |   foo|bar|foobar|
    +------+---+------+
    |2000.0| 11|1989.0|
    |1000.0| 12| 988.0|
    |3000.0| 20|2980.0|
    +------+---+------+
    <BLANKLINE>
    """

    fun = Param(Params._dummy(), "fun", "user-defined function used to transform a record")

    def __init__(self, fun, inputCols, outputCol):
        super(ColumnMapper, self).__init__()

        self.setFun(fun)
        self.setInputCols(inputCols)
        self.setOutputCol(outputCol)

    def setFun(self, fun):
        """Sets the transformation function.

        Args:
            fun: a user-defined function used to transform a record

        Returns:
            ``self``
        """
        self._paramMap[self.fun] = fun
        return self

    def getFun(self):
        """
        Returns:
            a user-defined function used to transform a record
        """
        return self.getOrDefault(self.fun)

    def getOutputCols(self):
        """

        Returns:
            the names of output columns
        """
        return [self.getOutputCol()]

    @property
    def _column_appending(self):
        return True

    def _transform(self, dataset: pyspark.sql.DataFrame):
        input_selectors = [dataset[col] for col in self.getInputCols()]
        dataset = dataset.withColumn(self.getOutputCol(), self.getFun()(*input_selectors))
        return dataset
