from typing import Dict, Optional

from pyspark.sql import DataFrame

from gai.v2.spark.transformer import nullify_cells
from gai.v2.utils import get_or_create_spark_session
from gai.v2.spark.demo_data import demo_data_for_cell_nullifier


def summary_of(df: DataFrame, null_equiv: Dict, metrics: Optional[list] = None):
    """Computes statistics for each column. Supported statistics include count,
    mean, stddev, min, max, saturation, and percentiles.

    Args:
        df:
            the dataframe.

        null_equiv:
            the dictionary that specifies the values that are equivalent to null
            values. Each (key, value) pair specifies the column name and the
            values that are equivalent to null. The latter can be specified
            via a list (set, tuple, etc.) of values equivalent to null, or a
            unary predicate that returns ``True`` on values equivalent to null
            values. If no value is specified for a column, the default null
            value (namely, ``None``) is assumed.

        metrics:
            list of statistics, which can be any subset of
            ``["count", "mean", "stddev", "min", "max", "saturation"]``
            and arbitrary percentiles in the form like ``"24%"``.
            Default statistics are
            ``["count", "mean", "stddev", "min", "25%", "50%", "75%", "max", "saturation"]``.

    Returns:
        the summary.


    >>> df = demo_data_for_cell_nullifier()
    >>> df.show()
    +------+--------+--------------------+-------------+--------------------+---------------+
    |secret|     day|                gid_|_month_offset|         ft_usertags|ft_category_cnt|
    +------+--------+--------------------+-------------+--------------------+---------------+
    |     1|20181114|ANDROID-68b6c301c...|         null|                    |               |
    |     1|20181114|ANDROID-68b6c301c...|           -1|                    |               |
    |     2|20181114|ANDROID-68b6c301c...|            0|                    |               |
    |     2|20181114|ANDROID-68b6c301c...|           -1|                    |               |
    |     3|20181114|                null|            0|                    |               |
    |     3|20181114|                null|           -1|                    |               |
    |     4|20181126|ANDROID-0000db9c0...|            0|                    |              3|
    |     4|20181126|ANDROID-0000db9c0...|           -1|                    |              3|
    |     5|20181126|ANDROID-846049d18...|            0|022000,026400,02d400|             12|
    |     5|20181126|ANDROID-846049d18...|           -1|022000,026400,02d400|             13|
    |     7|20181126|        NON-EXISTENT|            0|                    |               |
    |     7|20181126|        non-existent|           -1|                    |               |
    |     8|20180709|ANDROID-000340aa5...|            0|    02b000,02b100,h0|             14|
    |     8|20180709|ANDROID-000340aa5...|           -1|    02b000,02b100,h0|             15|
    |     9|20180709|ANDROID-00033a1c3...|            0|022000,022500,02d300|              7|
    |     9|20180709|ANDROID-00033a1c3...|           -1|022000,022100,02d300|              9|
    |    11|20180709|                null|            0|                    |               |
    |    11|20180709|                null|           -1|                    |               |
    +------+--------+--------------------+-------------+--------------------+---------------+
    <BLANKLINE>
    >>> summary = summary_of(df=df,
    ...                      null_equiv={'gid_'           : lambda x: x.casefold() == 'non-existent'.casefold(),
    ...                                  'ft_category_cnt': ['']})
    >>> summary.show()
    +----------+------------------+------------------+--------------------+-------------------+----------------+------------------+
    |   summary|            secret|               day|                gid_|      _month_offset|     ft_usertags|   ft_category_cnt|
    +----------+------------------+------------------+--------------------+-------------------+----------------+------------------+
    |       25%|                 3|       2.0180709E7|                null|                 -1|            null|               3.0|
    |       50%|                 5|       2.0181114E7|                null|                 -1|            null|               9.0|
    |       75%|                 8|       2.0181126E7|                null|                  0|            null|              13.0|
    |     count|                18|                18|                  12|                 17|              18|                 8|
    |       max|                11|          20181126|ANDROID-846049d18...|                  0|02b000,02b100,h0|                 9|
    |      mean| 5.555555555555555|       2.0180983E7|                null|-0.5294117647058824|            null|               9.5|
    |       min|                 1|          20180709|ANDROID-0000db9c0...|                 -1|                |                12|
    |saturation|               1.0|               1.0|  0.6666666666666666| 0.9444444444444444|             1.0|0.4444444444444444|
    |    stddev|3.2938842120837184|199.42800558638692|                null| 0.5144957554275265|            null| 4.780914437337574|
    +----------+------------------+------------------+--------------------+-------------------+----------------+------------------+
    <BLANKLINE>

    """

    metrics = metrics or ["count", "mean", "stddev", "min", "25%", "50%", "75%", "max", "saturation"]
    nonsat_metrics = tuple(set(metrics) - {"saturation"})
    nullified_df = nullify_cells(df, null_equiv, df.columns)
    summarized = nullified_df
    numeric_summary = summarized.summary(*nonsat_metrics)

    total = df.count()
    count_df = nullified_df.summary("count")

    if "saturation" in metrics:
        saturation = {**{'summary': 'saturation'}, **{col: float(count_df.first()[col]) / total for col in df.columns}}
        spark = get_or_create_spark_session()
        saturation_df = spark.createDataFrame([saturation], count_df.schema)
        temp_df = saturation_df.union(count_df)
    else:
        temp_df = count_df

    result = temp_df.join(numeric_summary, on=numeric_summary.columns, how='outer')
    result = result.filter(result['summary'].isin(metrics))
    result = result.sort(["summary"])

    return result
