import math
from typing import List, Callable

from pyspark import Row
from pyspark.sql import DataFrame
from pyspark.sql.types import StructType, StructField, StringType, DoubleType

from gai.v2.spark.base import SparkTransformer
from gai.v2.spark.feature.variable_stats import explode_list_plain, explode_map_plain, prepare_test_dataframe, \
    adjusted_woe_and_iv
from gai.v2.utils import get_or_create_spark_session


def make_map2expand(label_col: str, composite_cols: List[str], exploders: List[Callable], simple_cols: List[str]):
    def map2expand(rows):
        for row in rows:
            prototype = {'value': 0.0}
            if row[label_col] == 1:
                prototype['nonevents'] = 0.0
                prototype['events'] = 1.0
            else:
                prototype['nonevents'] = 1.0
                prototype['events'] = 0.0

            for simple_col in simple_cols:
                result = prototype.copy()
                result['feature'] = simple_col
                result['value'] = float(row[simple_col])
                yield ((result['feature'], result['value']), result)

            assert len(composite_cols) == len(exploders)
            for composite_col, exploder in zip(composite_cols, exploders):
                parsed_features = exploder(row[composite_col])

                # one feature only get one value for an entity
                assert len(parsed_features) == len(set([tag[0] for tag in parsed_features]))

                for tuple in parsed_features:
                    result = prototype.copy()
                    result['feature'] = tuple[0]
                    result['value'] = float(tuple[1])
                    if result['value'] == 0:
                        result['nonevents'] = 0.0
                        result['events'] = 0.0

                    yield ((result['feature'], result['value']), result)

    return map2expand


def make_add_zero_line(total_num, pos_num, simple_cols):
    def add_zero_line(rows):
        for row in rows:
            if row[1]['feature'] not in set(simple_cols):  # in composite columns
                row[1]['value'] = 0.0
                row[1]['nonevents'] = total_num - pos_num - row[1]['nonevents']
                row[1]['events'] = pos_num - row[1]['events']
                assert row[1]['nonevents'] >= 0 and row[1]['events'] >= 0
                yield row

    return add_zero_line


def add_up_events(x: dict, y: dict):
    res = x.copy()
    res['events'] = x['events'] + y['events']
    res['nonevents'] = x['nonevents'] + y['nonevents']
    return res


def delete_zero_zero(rows):
    for row in rows:
        if not (row[1]['nonevents'] == 0 and row[1]['events'] == 0):
            yield row


def compute_group_woe_iv(df: DataFrame,
                         label_col: str,
                         label_range: List[int],
                         composite_cols: List[str],
                         exploders: List[Callable],
                         simple_cols: List[str]):
    def make_calculate_woe_iv(event_total, nonevent_total):
        def calculate_woe_iv(row: Row):
            row_dict = row[1]
            res = adjusted_woe_and_iv(row_dict['events'], row_dict['nonevents'], event_total, nonevent_total)
            row_dict['pct_events'] = res['pct_events']
            row_dict['pct_nonevents'] = res['pct_nonevents']
            row_dict['woe'] = res['woe']
            row_dict['iv'] = res['iv']
            return Row(**row_dict)

        return calculate_woe_iv

    assert set(label_range) == {-1, 1} or set(label_range) == {0, 1}
    assert label_col in df.columns
    assert set(composite_cols).issubset(df.columns)
    assert set(simple_cols).issubset(df.columns)
    spark = get_or_create_spark_session()
    schema = StructType([StructField('feature', StringType(), True),
                         StructField('value', DoubleType(), True),
                         StructField('nonevents', DoubleType(), True),
                         StructField('events', DoubleType(), True),
                         StructField('pct_nonevents', DoubleType(), True),
                         StructField('pct_events', DoubleType(), True),
                         StructField('woe', DoubleType(), True),
                         StructField('iv', DoubleType(), True), ])

    num = df.count()
    pos_num = df.filter(df[label_col] > 0).count()
    map2expand = make_map2expand(label_col, composite_cols, exploders, simple_cols)
    rdd = df.rdd.mapPartitions(map2expand)

    rdd = rdd.reduceByKey(add_up_events)
    rdd = rdd.map(lambda row: (row[1]['feature'], row[1]))

    add_zero_line = make_add_zero_line(num, pos_num, simple_cols)
    temp_rdd = rdd.reduceByKey(add_up_events).mapPartitions(add_zero_line)

    rdd = rdd.union(temp_rdd)
    rdd = rdd.mapPartitions(delete_zero_zero)

    calculate_woe_iv = make_calculate_woe_iv(pos_num, num - pos_num)

    rdd = rdd.map(calculate_woe_iv)
    result = spark.createDataFrame(rdd, schema)

    return result


class GroupwiseWoeIvCalculator(SparkTransformer):
    """Computes WOE and groupwise information value.

    Args:
        labelCol: the name of label column
        labelRange: the collection of values that a label can take
        compoCols: the list of names of composite columns. A composite column represents a collection of features.
        compoExploders: the list of callables that disassemble each cell of composite columns into lists. The order of
            exploders should match that of ``compoCols``.
        simpleCols: the list of names of simple columns. A simple column represents a single feature.

    In the case of degenerate distribution,
    `adjusted WOE <https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html>`_
    is used instead of the canonical definition.

    See Also:
        https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html

    >>> df = prepare_test_dataframe()
    >>> df.show()
    +---+------+------------+-------------------+------------+
    |age|  name|    app_list|      category_list|random_label|
    +---+------+------------+-------------------+------------+
    |  7| Alice| app_1,app_3|  cate_0:3,cate_1:2|           0|
    |  7|   Bob|app_3,app_21| cate_0:4,cate_10:7|           1|
    |  3|Claire| app_1,app_7| cate_0:3,cate_11:4|           0|
    |  7|   Dan| app_9,app_5|cate_20:2,cate_99:7|           0|
    +---+------+------------+-------------------+------------+
    <BLANKLINE>
    >>> calc = GroupwiseWoeIvCalculator(labelCol='random_label', labelRange=[0,1],
    ...     compoCols=['app_list', 'category_list'],
    ...     compoExploders=[explode_list_plain, explode_map_plain],
    ...     simpleCols=['age'])
    >>> result = calc.transform(df)
    >>> result.printSchema()
    root
     |-- feature: string (nullable = true)
     |-- value: double (nullable = true)
     |-- nonevents: double (nullable = true)
     |-- events: double (nullable = true)
     |-- pct_nonevents: double (nullable = true)
     |-- pct_events: double (nullable = true)
     |-- woe: double (nullable = true)
     |-- iv: double (nullable = true)
    <BLANKLINE>
    >>> result.orderBy(['feature', 'value'], ascending=[0,1]).show()
    +-------+-----+---------+------+------------------+----------+--------------------+-------------------+
    |feature|value|nonevents|events|     pct_nonevents|pct_events|                 woe|                 iv|
    +-------+-----+---------+------+------------------+----------+--------------------+-------------------+
    |cate_99|  0.0|      2.0|   1.0|0.6666666666666666|       1.0|-0.40546510810816444|0.13515503603605483|
    |cate_99|  7.0|      1.0|   0.0|0.3333333333333333|       0.0|                 0.0|                0.0|
    |cate_20|  0.0|      2.0|   1.0|0.6666666666666666|       1.0|-0.40546510810816444|0.13515503603605483|
    |cate_20|  2.0|      1.0|   0.0|0.3333333333333333|       0.0|                 0.0|                0.0|
    |cate_11|  0.0|      2.0|   1.0|0.6666666666666666|       1.0|-0.40546510810816444|0.13515503603605483|
    |cate_11|  4.0|      1.0|   0.0|0.3333333333333333|       0.0|                 0.0|                0.0|
    |cate_10|  0.0|      3.0|   0.0|               1.0|       0.0|  0.8472978603872037| 0.8472978603872037|
    |cate_10|  7.0|      0.0|   1.0|               0.0|       1.0| -2.1972245773362196| 2.1972245773362196|
    | cate_1|  0.0|      2.0|   1.0|0.6666666666666666|       1.0|-0.40546510810816444|0.13515503603605483|
    | cate_1|  2.0|      1.0|   0.0|0.3333333333333333|       0.0|                 0.0|                0.0|
    | cate_0|  0.0|      1.0|   0.0|0.3333333333333333|       0.0|                 0.0|                0.0|
    | cate_0|  3.0|      2.0|   0.0|0.6666666666666666|       0.0|  0.5108256237659907| 0.3405504158439938|
    | cate_0|  4.0|      0.0|   1.0|               0.0|       1.0| -2.1972245773362196| 2.1972245773362196|
    |  app_9|  0.0|      2.0|   1.0|0.6666666666666666|       1.0|-0.40546510810816444|0.13515503603605483|
    |  app_9|  1.0|      1.0|   0.0|0.3333333333333333|       0.0|                 0.0|                0.0|
    |  app_7|  0.0|      2.0|   1.0|0.6666666666666666|       1.0|-0.40546510810816444|0.13515503603605483|
    |  app_7|  1.0|      1.0|   0.0|0.3333333333333333|       0.0|                 0.0|                0.0|
    |  app_5|  0.0|      2.0|   1.0|0.6666666666666666|       1.0|-0.40546510810816444|0.13515503603605483|
    |  app_5|  1.0|      1.0|   0.0|0.3333333333333333|       0.0|                 0.0|                0.0|
    |  app_3|  0.0|      2.0|   0.0|0.6666666666666666|       0.0|  0.5108256237659907| 0.3405504158439938|
    +-------+-----+---------+------+------------------+----------+--------------------+-------------------+
    only showing top 20 rows
    <BLANKLINE>


    The meaning of each field is listed below.

    - ``feature``: the name of a particular feature
    - ``value``: a particular value in the range of the feature
    - ``nonevents``: the number of non-events (records with negative labels) of the specified ``(feature, value)`` pair
    - ``events``: the number of events (records with positve labels) of the specified ``(feature, value)`` pair
    - ``pct_nonevents``: the percentage of non-events of the specified ``(feature, value)`` pair in the set of all non-events
    - ``pct_events``: the percentage of events of the specified ``(feature, value)`` pair in the set of all events
    - ``woe``: the WOE of the specified ``(feature, value)`` pair; adjusted WOE is used if the standard WOE is not well-defined
    - ``iv``: the information value of the specified ``(feature, value)`` pair
    """

    def __init__(self, labelCol: str, labelRange: List[int], compoCols: List[str], compoExploders: List[Callable],
                 simpleCols: List[str]):
        super().__init__()
        self.setLabelCol(labelCol) \
            .setLabelRange(labelRange) \
            .setCompoCols(compoCols) \
            .setCompoExploders(compoExploders) \
            .setSimpleCols(simpleCols)

    def setLabelCol(self, labelCol):
        """Sets the name of label column

        Args:
            labelCol: the name of label column

        Returns:
            ``self``
        """
        self._labelCol = labelCol
        return self

    def getLabelCol(self):
        """

        Returns:
            the name of label column
        """
        return self._labelCol

    def setLabelRange(self, labelRange):
        """Sets the range of label value.

        Args:
            labelRange: the range of label value

        Returns:
            ``self``
        """
        self._labelRange = labelRange
        return self

    def getLabelRange(self):
        """

        Returns:
            the range of label value
        """
        return self._labelRange

    def setCompoCols(self, compoCols):
        """Sets the names of composite columns.

        Args:
            compoCols: the names of composite columns

        Returns:
            ``self``
        """
        self._compoCols = compoCols
        return self

    def getCompoCols(self):
        """

        Returns:
            the names of composite columns
        """
        return self._compoCols

    def setCompoExploders(self, compoExploders):
        """Sets the content exploders of composite columns.

        Args:
            compoExploders: the content exploders of composite columns, in an order
                corresponding to the return value of ``getCompoCols()``

        Returns:
            ``self``
        """
        self._compoExploders = compoExploders
        return self

    def getCompoExploders(self):
        """

        Returns:
            the content exploders of composite columns
        """
        return self._compoExploders

    def setSimpleCols(self, simpleCols):
        """Sets the names of simple columns.

        Args:
            simpleCols: the names of simple columns

        Returns:
            ``self``
        """
        self._simpleCols = simpleCols
        return self

    def getSimpleCols(self):
        """

        Returns:
            the names of simple columns
        """
        return self._simpleCols

    def getInputCols(self) -> List[str]:
        """

        Returns:
            the names of input columns
        """
        return [self.getLabelCol()] + self.getCompoCols() + self.getSimpleCols()

    def getOutputCols(self) -> List[str]:
        """

        Returns:
            the names of newly generated columns
        """
        return ['feature', 'value', 'nonevents', 'events', 'pct_nonevents', 'pct_events', 'woe', 'iv']

    def _transform(self, dataset):
        ret = compute_group_woe_iv(dataset,
                                   label_col=self.getLabelCol(),
                                   label_range=self.getLabelRange(),
                                   composite_cols=self.getCompoCols(),
                                   exploders=self.getCompoExploders(),
                                   simple_cols=self.getSimpleCols())
        return ret
