#! /usr/bin/env python3
import math

import functools
import operator
import pandas
from pyspark import Row
from pyspark.sql import DataFrame, functions
from pyspark.sql.functions import PandasUDFType, pandas_udf
from typing import List, Callable, Optional

from gai.v2.spark.feature.stats import neg_x_log
from gai.v2.utils import get_or_create_spark_session


def prepare_test_dataframe():
    spark = get_or_create_spark_session()
    df = spark.createDataFrame(
        [(7, 'Alice', 'app_1,app_3', "cate_0:3,cate_1:2", 0),
         (7, 'Bob', 'app_3,app_21', "cate_0:4,cate_10:7", 1),
         (3, 'Claire', 'app_1,app_7', "cate_0:3,cate_11:4", 0),
         (7, 'Dan', 'app_9,app_5', "cate_20:2,cate_99:7", 0)],
        schema=["age", "name", "app_list", "category_list", "random_label"])
    return df


def prepare_test_dataframe_dup_name():
    spark = get_or_create_spark_session()
    df = spark.createDataFrame(
        [(7, 'Alice', 'app_1,app_3', "cate_0:3,cate_1:2", 0, "app_1"),
         (7, 'Bob', 'app_3,app_21', "cate_0:4,cate_10:7", 1, "app_4#app_21"),
         (3, 'Claire', 'app_1,app_7', "cate_0:3,cate_11:4", 0, ""),
         (7, 'Dan', 'app_9,app_5', "cate_20:2,cate_99:7", 0, "")],
        schema=["age", "name", "app_list", "category_list", "label", "install_pkgs"])
    return df


def _merge_transform(left: list, right: list, cmp, fun_match, fun_left, fun_right):
    # Pre: is_sorted(left) ∧ is_sorted(right) ∧ key_type(left[0]) == key_type(right[0])

    i = 0
    j = 0
    ret = []

    # Invariant: key(left[0..i-1]) ⊆ key(source(ret))
    #            ∧ key(right[0..j-1]) ⊆ key(source(ret))
    #            ∧ key(left[0..i-1]) ∪ key(right[0..j-1]) = key(source(ret))
    #            ∧ is_sorted(key(source(ret)))
    # Bound: len(left) + len(right) - i - j
    while True:
        if i != len(left) and j != len(right):
            if cmp(left[i], right[j]) == 0:
                ret.append(fun_match(left[i], right[j]))
                i += 1
                j += 1
            elif cmp(left[i], right[j]) < 0:
                ret.append(fun_left(left[i]))
                i += 1
            else:  # cmp(left[i], right[j]) > 0
                ret.append(fun_right(right[j]))
                j += 1
        elif i == len(left) and j == len(right):
            break
        elif i != len(left):
            ret.append(fun_left(left[i]))
            i += 1
        else:  # j != len(right):
            ret.append(fun_right(right[j]))
            j += 1
    # Post: i == len(left) ∧ j == len(right)
    return ret


def fill_missing(data: list, key_set: list, key_fun, generate_from_key):
    """

    Args:
        data: a record list such that ``key_fun(data[i])`` is a valid operation
        key_set: the whole key set, sorted
        key_fun: a function that return the key from an element of ``data``
        generate_from_key: a function that generate a default element from a key

    Returns:
        a record list which is a superset of ``data``, and the key of which is the same
        as ``key_set``. The records additional to ``data`` are generated by ``generate_from_key``.
    """

    def cmp(x, y):
        if key_fun(x) == y:
            return 0
        elif key_fun(x) < y:
            return -1
        else:
            return 1

    def match_fun(x, y):
        return x

    def match_left(x):
        return x

    def match_right(y):
        return generate_from_key(y)

    return _merge_transform(data, key_set, cmp, match_fun, match_left, match_right)


def parallelize_list(lst: list, col: str) -> DataFrame:
    """

    Args:
        lst: a list of elements
        col: the name of column

    Returns:
        a dataframe whose schema is ``(col,)``
    """

    spark = get_or_create_spark_session()
    lst = [(x,) for x in lst]
    return spark.createDataFrame(lst, [col])


def compute_label_count(df: DataFrame, label_col: str, label_range: List[int]) -> DataFrame:
    """

    Args:
        df: a DataFrame which should not contain a column named 'count'
        label_col: the name of the label column
        label_range: the possible values that a label can take

    Returns:
        a DataFrame with one column named ``label_col``, and another column named
        ``count``. Those label values that do not appear in the input dataset also
        appear in the result data, but with ``count=0``.
    """

    result = df.groupBy(label_col).count()
    assert set(result.columns) == {label_col, 'count'}
    labels = parallelize_list(label_range, label_col)
    assert set(labels.columns) == {label_col}
    result = labels.join(result, on=label_col, how='left_outer')
    assert set(result.columns) == {label_col, 'count'}
    result = result.na.fill(0.0)
    return result


def split_plain(s: Optional[str], sep: str = None):
    """Splits the input string by the separator string.

    Args:
        s: the input string
        sep: the separator string

    Returns:
        the split result
    """

    # Pre: s ∈ (NoneType ∪ str)

    if s is None:
        return []

    assert isinstance(s, str)

    s = s.strip()

    if len(s) == 0:
        return []
    else:
        return s.split(sep)


def split_by_comma(s: str):
    """Splits the input string by comma.

    Args:
        s: the input string

    Returns:
        the split result
    """
    return split_plain(s, ',')


def explode_list_plain(cell: str, sep: str = ","):
    """

    Args:
        cell: a concatenated string of a list of elements separated by ``sep``
        sep: separation punctuation

    Returns:
        an expanded list of ``cell``

    Examples:
        >>> explode_list_plain("f1,f3,f20,f43")
        [('f1', 1.0), ('f3', 1.0), ('f20', 1.0), ('f43', 1.0)]
    """
    return [(name, 1.0) for name in split_plain(cell, sep)]


def explode_map_plain(cell: str, inter_field_sep: str = ',', intra_field_sep: str = ':', value_type=float):
    """

    Args:
        cell: a string of a dictionary with punctuations ``inter_field_sep`` and ``intra_field_sep``
        inter_field_sep: the punctuation separating two fields
        intra_field_sep: the punctuation separating the key and the value within a field
        value_type: the type of the value in a field

    Returns:
        an expanded list of ``cell``

    Examples:

        >>> explode_map_plain("k1:2.0,k3:3.4,k9:0.7")
        [('k1', 2.0), ('k3', 3.4), ('k9', 0.7)]
    """

    def transform_field(w: str, sep):
        parts = split_plain(w, sep)
        assert len(parts) == 2
        return (parts[0], value_type(parts[1]))

    return [transform_field(x, intra_field_sep) for x in split_plain(cell, inter_field_sep)]


def _make_nonzero_counter():
    # call this function only after SparkSession has been initialized
    @pandas_udf(returnType="feature string, nnz double, nz double", functionType=PandasUDFType.GROUPED_MAP)
    def count_nonzeros(pdf):  # Don't put type annotation here.
        feature = pdf['feature'].iloc[0]
        nnz = pdf['nnz'].sum()
        nz = pdf['nz'].sum()
        return pandas.DataFrame([[feature, nnz, nz]])

    return count_nonzeros


def _compute_valuewise_count_without_default(df: DataFrame, label_col: str, composite_cols: List[str],
                                             exploders: List[Callable], simple_cols: List[str]) -> DataFrame:
    """

    Args:
        df: the dataframe to process
        label_col: the name of label column
        composite_cols: the names of composite columns. A composite column represents a group of features.
        exploders: the list of functions that map a composite cell to a list of ``(feature_name, label_value)``.
            Feature names whose feature values are zero should be ignored.
        simple_cols: the names of simple columns. A simple column represents a single feature.

    Returns:
        a dataframe representing a set of (feature, label, value, count) tuple, the schema of which
        are ('feature', label_col, 'count').
        Rows whose ``value`` are zero are ignored.
    """

    assert label_col in df.columns
    assert set(composite_cols).issubset(df.columns)
    assert set(simple_cols).issubset(df.columns)
    assert len(composite_cols) == len(exploders)

    def from_simple(row: Row, simple_col_name: str, label_col: str):
        cell_value = float(row[simple_col_name])
        return (
            (simple_col_name, row[label_col], cell_value),
            1)

    def explode(row: Row) -> list:
        ret = []
        for name in simple_cols:
            if row[name] != 0:
                ret.append(from_simple(row, name, label_col))
        for index, name in enumerate(composite_cols):
            compo_explode = exploders[index]
            ell = compo_explode(row[name])  # return [(feature0, value0), (feature1, value1), ...]
            ell = [((x[0], row[label_col], x[1]), 1) for x in ell if x[1] != 0]
            ret.extend(ell)
        return ret

    def to_row(tup: tuple) -> Row:
        Prototype = Row("feature", label_col, "value", "count")
        return Prototype(tup[0][0], tup[0][1], tup[0][2], tup[1])

    result = df.rdd.flatMap(explode) \
        .reduceByKey(operator.add) \
        .map(to_row) \
        .toDF()
    return result


def _compute_valuewise_count(df: DataFrame, label_col: str, label_range: List[int], composite_cols: list,
                             exploders: List[Callable], simple_cols: List[str]) -> DataFrame:
    """

    Args:
        df: the dataframe
        label_col: the name of label column
        label_range: the range of label
        composite_cols: the names of composite columns. A composite column represents a group of features.
        exploders: the list of functions that map a composite cell to a list of ``(feature_name, label_value)``.
            Feature names whose feature values are zero should be ignored.
        simple_cols: the names of simple columns. A simple column represents a single feature.

    Returns:
        a dataframe representing a set of (feature, label, value, count) tuple.
        The ``count`` is counted over the same (feature, label, value).
    """

    assert label_col in df.columns
    assert set(composite_cols).issubset(df.columns)
    assert set(simple_cols).issubset(df.columns)
    assert len(composite_cols) == len(exploders)

    valuewise_count = _compute_valuewise_count_without_default(df, label_col, composite_cols, exploders, simple_cols)

    def to_key_value_tuple(row: Row) -> tuple:
        return ((row['feature'], row[label_col]), row['count'])

    def to_row(tup: tuple) -> Row:
        Prototype = Row("feature", label_col, "rest_count")
        return Prototype(tup[0][0], tup[0][1], tup[1])

    # Schema(count_of_nonzero) = (feature, label, rest_count), where ``rest_count`` the total count of
    # rows under the same feature and label whose values are nonzero
    count_of_nonzero = valuewise_count.rdd.map(to_key_value_tuple) \
        .reduceByKey(operator.add) \
        .map(to_row) \
        .toDF()

    feature = count_of_nonzero.select("feature").distinct()

    # Schema(label_count) = (label, count)
    label_count = compute_label_count(df, label_col, label_range)
    label_count.cache()
    label_count.count()
    label_feature_count = feature.crossJoin(label_count)

    temp = label_feature_count.withColumnRenamed("count", "total") \
        .join(count_of_nonzero, on=[label_col, 'feature'], how="outer") \
        .na.fill(0.0)

    count_of_zero = temp.withColumn("count", temp["total"] - temp["rest_count"]) \
        .drop("total") \
        .drop("rest_count") \
        .withColumn("value", functions.lit(0.0))

    assert set(count_of_zero.columns) == {"feature", label_col, "value", "count"}

    result = valuewise_count.unionByName(count_of_zero)
    result = result.filter(result['count'] != 0)
    result = result.orderBy(["feature", label_col])
    return result


def _make_info_value_calculator_for_categorical_variable_binary_class(label_col: str):
    # Call this function only after SparkSession has been initialized

    # Note that WOE and thus IV are generally not defined for multiclass data.
    def adjusted(value):
        return value + 0.5

    def adjusted_compo_iv(event_count, nonevent_count, event_total, nonevent_total):
        if event_total == 0 or nonevent_total == 0:
            return 0.0
        else:
            if event_count == 0 or nonevent_count == 0:
                adj_percent_event = adjusted(event_count) / event_total
                adj_percent_nonevent = adjusted(nonevent_count) / nonevent_total
            else:
                adj_percent_event = event_count / event_total
                adj_percent_nonevent = nonevent_count / nonevent_total

            percent_event = event_count / event_total
            percent_nonevent = nonevent_count / nonevent_total
            return (percent_nonevent - percent_event) * math.log(adj_percent_nonevent / adj_percent_event)

    @pandas_udf(returnType="feature string, info_value double", functionType=PandasUDFType.GROUPED_MAP)
    def compute_info_value(pdf):  # Don't put type annotation here.
        # We follow the definition in the following page.
        # https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html
        # Note that we use adjustedWOE instead of WOE to accommodate for vanishing group-wise
        # event number and vanishing group-wise non-event number.
        assert isinstance(pdf, pandas.DataFrame)
        assert {'feature', label_col, 'value', 'count'}.issubset(pdf.columns)

        events = pandas.DataFrame(pdf[pdf[label_col] == 1])
        nonevents = pandas.DataFrame(pdf[pdf[label_col] != 1])
        event_total = events['count'].sum()
        nonevent_total = nonevents['count'].sum()
        df_event = events.rename({'count': 'event_count'}, axis='columns').drop(label_col, axis='columns')
        df_nonevent = nonevents.rename({'count': 'nonevent_count'}, axis='columns').drop(label_col, axis='columns')
        df_merge = pandas.merge(df_event, df_nonevent, how='outer', on=['feature', 'value'])
        df_merge = df_merge.fillna(0.0)
        df_merge['compo_iv'] = df_merge.apply(
            lambda row: adjusted_compo_iv(row['event_count'], row['nonevent_count'], event_total, nonevent_total),
            axis='columns')

        info_value = df_merge['compo_iv'].sum()
        feature = pdf['feature'].iloc[0]
        return pandas.DataFrame([[feature, info_value]])

    return compute_info_value


def adjusted_woe(event_count, nonevent_count, event_total, nonevent_total):
    """Computes the (adjusted) WOE of a particular group.

    Args:
        event_count: the count of events within a particular group
        nonevent_count: the count of non-events within the same group as above
        event_total: the count of events over all groups
        nonevent_total: the count of non-events over all groups

    Returns:
        WOE of the particular group, or adjusted WOE if either ``event_count`` or ``nonevent_count`` is zero.
        The result is undefined if either ``event_total`` or ``nonevent_total`` is zero.

    See Also:
        https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html

    """

    def adjusted(value):
        return value + 0.5

    if event_total == 0 or nonevent_total == 0:
        return 0.0
    else:
        if event_count == 0 or nonevent_count == 0:
            adj_percent_event = adjusted(event_count) / event_total
            adj_percent_nonevent = adjusted(nonevent_count) / nonevent_total
        else:
            adj_percent_event = event_count / event_total
            adj_percent_nonevent = nonevent_count / nonevent_total

        return math.log(adj_percent_nonevent / adj_percent_event)


def adjusted_iv(event_count, nonevent_count, event_total, nonevent_total):
    """Computes the information value of a particular group.

    Args:
        event_count: the count of events within a particular group
        nonevent_count: the count of non-events within the same group as above
        event_total: the count of events over all groups
        nonevent_total: the count of non-events over all groups

    Returns:
        the information value of the particular group where the WOE is computed
        with :func:`adjusted_woe`.
        The result is undefined if either ``event_total`` or ``nonevent_total`` is zero.


    See Also:
        https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html
    """

    res = adjusted_woe_and_iv(event_count, nonevent_count, event_total, nonevent_total)
    return res['iv']


def adjusted_woe_and_iv(event_count, nonevent_count, event_total, nonevent_total):
    """Computes the WOE and the information value of a particular group.

    Args:
        event_count: the count of events within a particular group
        nonevent_count: the count of non-events within the same group as above
        event_total: the count of events over all groups
        nonevent_total: the count of non-events over all groups

    Returns:
        a dictionary whose key set is ``{'pct_nonevents', 'pct_events', 'woe', 'iv'}``.
        The WOE is computed with :func:`adjusted_woe`.
        The result is undefined if either ``event_total`` or ``nonevent_total`` is zero.

    See Also:
        https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html
    """

    if event_total == 0 or nonevent_total == 0:
        percent_nonevent = float('nan')
        percent_event = float('nan')
        woe = float('nan')
        iv = float('nan')
    else:
        percent_event = event_count / event_total
        percent_nonevent = nonevent_count / nonevent_total
        woe = adjusted_woe(event_count, nonevent_count, event_total, nonevent_total)
        iv = (percent_nonevent - percent_event) * woe
    return {'pct_nonevents': percent_nonevent,
            'pct_events'   : percent_event,
            'woe'          : woe,
            'iv'           : iv}


# def adjusted_woe_and_iv_in_pd(event_count, nonevent_count, event_total, nonevent_total):
#     return pandas.Series(adjusted_woe_and_iv(event_count, nonevent_count, event_total, nonevent_total))


# def _make_groupwise_woe_and_iv_calculator_for_categorical_variable_binary_class(label_col: str):
#     @pandas_udf(
#         returnType="feature string, value double, nonevents double, events double, "
#                    "pct_nonevents double, pct_events double, woe double, iv double",
#         functionType=PandasUDFType.GROUPED_MAP)
#     def compute_groupwise_woe_and_iv(pdf):
#         events = pandas.DataFrame(pdf[pdf[label_col] == 1])
#         nonevents = pandas.DataFrame(pdf[pdf[label_col] != 1])
#         event_total = events['count'].sum()
#         nonevent_total = nonevents['count'].sum()
#         df_nonevent = nonevents.rename({'count': 'nonevents'}, axis='columns').drop(label_col, axis='columns')
#         df_event = events.rename({'count': 'events'}, axis='columns').drop(label_col, axis='columns')
#         df_merge = pandas.merge(df_nonevent, df_event, how='outer', on=['feature', 'value'])
#         df_merge = df_merge.fillna(0.0)
#         df_merge = df_merge.merge(
#             df_merge.apply(
#                 lambda row: adjusted_woe_and_iv_in_pd(row['events'], row['nonevents'], event_total, nonevent_total),
#                 axis='columns'),
#             left_index=True, right_index=True)
#         return df_merge
#
#     return compute_groupwise_woe_and_iv


def _make_chi_sq_calculator_for_categorical_variable_multiclass(label_col: str):
    @pandas_udf(returnType="feature string, chi_sq double", functionType=PandasUDFType.GROUPED_MAP)
    def compute_chi_sq(pdf):
        # We follow the definition in the following page.
        # https://www.investopedia.com/terms/c/chi-square-statistic.asp

        assert isinstance(pdf, pandas.DataFrame)
        assert {'feature', label_col, 'value', 'count'}.issubset(pdf.columns)

        pivoted = pdf.pivot(index=label_col, columns='value', values='count')
        pivoted = pivoted.fillna(0.0)

        row_sum = pivoted.sum(axis='columns')
        col_sum = pivoted.sum(axis='index')

        if any([x == 0 for x in row_sum]) or any([x == 0 for x in col_sum]):
            chi_sq = None
        else:
            observed = []
            expected = []

            total = pdf['count'].sum()
            for r in row_sum.index:
                for c in col_sum.index:
                    observed.append(pivoted.at[r, c])
                    expected.append(row_sum[r] * col_sum[c] / total)

            chi_sq = sum([(o - e) * (o - e) / e for o, e in zip(observed, expected)])

        feature = pdf['feature'].iloc[0]
        return pandas.DataFrame([[feature, chi_sq]])

    return compute_chi_sq


def _make_info_gain_calculator_for_categorical_variable_multiclass(label_col: str):
    @pandas_udf(returnType="feature string, info_gain double", functionType=PandasUDFType.GROUPED_MAP)
    def compute_info_gain(pdf):
        # We follow the definition in the following page.
        # https://en.wikipedia.org/wiki/Information_gain_in_decision_trees
        assert isinstance(pdf, pandas.DataFrame)
        assert {'feature', label_col, 'value', 'count'}.issubset(pdf.columns)

        pivoted = pdf.pivot(index=label_col, columns='value', values='count')
        pivoted = pivoted.fillna(0.0)

        row_sum = pivoted.sum(axis='columns')
        total = row_sum.sum()
        parent_entropy = (row_sum / total).apply(neg_x_log).sum()

        col_sum = pivoted.sum(axis='index')
        col_weight = col_sum / total

        intra_col_prob = (pivoted / col_sum)
        child_entropy = intra_col_prob.applymap(neg_x_log).sum(axis='index')
        weighted_child_entropy = (col_weight * child_entropy).sum()

        info_gain = parent_entropy - weighted_child_entropy

        feature = pdf['feature'].iloc[0]
        return pandas.DataFrame([[feature, info_gain]])

    return compute_info_gain


def _compute_scores_for_binary_variable_binary_class(df: DataFrame, label_col: str, label_range: List[int],
                                                     composite_cols: List[str], exploders: List[Callable],
                                                     simple_cols: List[str]):
    return _compute_scores_for_categorical_variable_binary_class(df, label_col, label_range, composite_cols, exploders,
                                                                 simple_cols)


# def _compute_groupwise_woe_and_iv_for_categorical_variable_binary_class(df: DataFrame, label_col: str,
#                                                                         label_range: List[int],
#                                                                         composite_cols: List[str],
#                                                                         exploders: List[Callable],
#                                                                         simple_cols: List[str]):
#     assert set(label_range) == {-1, 1} or set(label_range) == {0, 1}
#     valuewise_count = _compute_valuewise_count(df, label_col, label_range, composite_cols, exploders, simple_cols)
#
#     woe_and_iv = valuewise_count.groupBy('feature').apply(
#         _make_groupwise_woe_and_iv_calculator_for_categorical_variable_binary_class(label_col))
#
#     return woe_and_iv


def _compute_scores_for_categorical_variable_binary_class(df: DataFrame, label_col: str, label_range: List[int],
                                                          composite_cols: List[str], exploders: List[Callable],
                                                          simple_cols: List[str]):
    assert set(label_range) == {-1, 1} or set(label_range) == {0, 1}

    valuewise_count = _compute_valuewise_count(df, label_col, label_range, composite_cols, exploders, simple_cols)

    feature_info_value = valuewise_count.groupBy('feature').apply(
        _make_info_value_calculator_for_categorical_variable_binary_class(label_col))
    feature_info_gain = valuewise_count.groupBy('feature').apply(
        _make_info_gain_calculator_for_categorical_variable_multiclass(label_col))
    feature_chi_sq = valuewise_count.groupBy('feature').apply(
        _make_chi_sq_calculator_for_categorical_variable_multiclass(label_col))

    df_list = [feature_info_value, feature_info_gain, feature_chi_sq]
    result = functools.reduce(lambda x, y: x.join(y, on='feature', how='inner'),
                              df_list)
    return result


def _compute_scores_for_categorical_variable_multiclass(df: DataFrame, label_col: str, label_range: List[int],
                                                        composite_cols: List[str], exploders: List[Callable],
                                                        simple_cols: List[str]):
    valuewise_count = _compute_valuewise_count(df, label_col, label_range, composite_cols, exploders, simple_cols)

    feature_info_gain = valuewise_count.groupBy('feature').apply(
        _make_info_gain_calculator_for_categorical_variable_multiclass(label_col))
    feature_chi_sq = valuewise_count.groupBy('feature').apply(
        _make_chi_sq_calculator_for_categorical_variable_multiclass(label_col))

    df_list = [feature_info_gain, feature_chi_sq]
    result = functools.reduce(lambda x, y: x.join(y, on='feature', how='inner'),
                              df_list)
    return result
