#! /usr/bin/env python3
from typing import List

from pyspark.ml.feature import RegexTokenizer
from pyspark.ml.param import Param
from pyspark.ml.param.shared import HasInputCol, HasOutputCols, Params
from pyspark.sql import DataFrame, SparkSession

from gai.v2.spark.base import SparkTransformer
from gai.v2.utils import generate_unique_str


class StructuredColumnBreaker(SparkTransformer, HasInputCol, HasOutputCols):
    """Given a regular expression, an input column name, and a list of output
    column names, an instance of ``StructuredColumnBreaker`` may transform a
    ``DataFrame`` into another by appending new columns, whose names are
    specified by ``outputCols``, that are obtained by splitting the input column
    using ``pattern``.

    Args:
        inputCol: input column name
        outputCols: list of output columns
        pattern: splitting pattern (in regular expression)

    >>> spark = SparkSession.builder.master("local").getOrCreate()
    >>> df = spark.createDataFrame([("Alice|King|D3xFtrBx|20180103", 11),
    ...                             ("Bob|Wolf|L1MTyWfP|20180507", 12),
    ...                             ("John||tpqowtQE|20180723", 20)],
    ...                            schema=("text", "age"))
    >>> df.show()
    +--------------------+---+
    |                text|age|
    +--------------------+---+
    |Alice|King|D3xFtr...| 11|
    |Bob|Wolf|L1MTyWfP...| 12|
    |John||tpqowtQE|20...| 20|
    +--------------------+---+
    <BLANKLINE>
    >>> col_breaker = StructuredColumnBreaker(inputCol="text",
    ...                     outputCols=["given_name", "sur_name", "id", "day"],
    ...                     pattern="\|")
    ... ## note that one should use regular expression for ``pattern``
    >>> df_res = col_breaker.transform(df)
    >>> df_res.show()
    +--------------------+---+----------+--------+--------+--------+
    |                text|age|given_name|sur_name|      id|     day|
    +--------------------+---+----------+--------+--------+--------+
    |Alice|King|D3xFtr...| 11|     Alice|    King|D3xFtrBx|20180103|
    |Bob|Wolf|L1MTyWfP...| 12|       Bob|    Wolf|L1MTyWfP|20180507|
    |John||tpqowtQE|20...| 20|      John|        |tpqowtQE|20180723|
    +--------------------+---+----------+--------+--------+--------+
    <BLANKLINE>
    >>> df_res.printSchema()
    root
     |-- text: string (nullable = true)
     |-- age: long (nullable = true)
     |-- given_name: string (nullable = true)
     |-- sur_name: string (nullable = true)
     |-- id: string (nullable = true)
     |-- day: string (nullable = true)
    <BLANKLINE>
    """

    pattern = Param(Params._dummy(), "pattern", "regular expression used to split strings")

    def __init__(self, inputCol, outputCols, pattern="\|"):
        super(StructuredColumnBreaker, self).__init__()
        self.setInputCol(inputCol)
        self.setOutputCols(list(outputCols))
        self.setPattern(pattern)

    def setPattern(self, pattern):
        """Sets the pattern.

        Args:
            pattern: pattern, that is, the regular expression used to split
                a field
        Returns:
            ``self``
        """
        self._paramMap[self.pattern] = pattern
        return self

    def getPattern(self):
        """
        Returns:
            pattern, that is, the regular expression used to split
            a field
        """
        return self.getOrDefault(self.pattern)

    def getInputCols(self) -> List[str]:
        """

        Returns:
            the names of input columns
        """
        return [self.getInputCol()]

    @property
    def _column_appending(self):
        return True

    def _transform(self, dataset: DataFrame) -> DataFrame:
        tmp_col = generate_unique_str(dataset.columns + self.getOutputCols())
        tokenizer = RegexTokenizer(inputCol=self.getInputCol(),
                                   outputCol=tmp_col,
                                   minTokenLength=0,
                                   toLowercase=False,
                                   pattern=self.getPattern())
        dataset = tokenizer.transform(dataset)

        output_cols = self.getOutputCols()
        output_selectors = [(col, dataset[tmp_col].getItem(index)) for index, col in enumerate(output_cols)]
        for col, selector in output_selectors:
            dataset = dataset.withColumn(col, selector)
        dataset = dataset.drop(tmp_col)

        return dataset
