import pyspark.sql.functions as F
from pyspark.sql import DataFrame


def safe_div(p, q):
    return float("nan") if q == 0 else p / q


def accuracy_of(df: DataFrame, label_col='label', prediction_col='prediction'):
    """Compute the accuracy of the prediction results (applicable to classification).

    Args:
        df: the input dataframe
        label_col: the name of the label column
        prediction_col: the name of the prediction column

    Returns:
        the accuracy, or NaN if division by zero happens
    """

    cnt_col = '_cnt'
    tmp_df = df.select(F.when(F.col(label_col) == F.col(prediction_col), 1.0)
                       .otherwise(0.0)
                       .alias(cnt_col))
    sum_col = "sum({})".format(cnt_col)
    correct_cnt = tmp_df.agg({cnt_col: 'sum'}).head()[sum_col]
    total = df.count()
    return safe_div(correct_cnt, total)


def quadruple_of(df: DataFrame, label_col='label', prediction_col='prediction', labels=(0.0, 1.0)):
    """Compute the number of true positives, false negatives, false positives,
    true negatives of the prediction results (applicable to binary classification).

    Args:
        df: the input dataframe
        label_col: the name of the label column
        prediction_col: the name of the prediction column
        labels: the values of negative label and positive label in order. Default is (0.0, 1.0).

    Returns:
        a dictionary with keys ``tp``, ``fn``, ``fp``, and ``tn``
    """

    neg = labels[0]
    pos = labels[1]
    tp = F.when((F.col(label_col) == pos) & (F.col(prediction_col) == pos), 1.0) \
        .otherwise(0.0).alias('_tp')
    fn = F.when((F.col(label_col) == pos) & (F.col(prediction_col) == neg), 1.0) \
        .otherwise(0.0).alias('_fn')
    fp = F.when((F.col(label_col) == neg) & (F.col(prediction_col) == pos), 1.0) \
        .otherwise(0.0).alias('_fp')
    tn = F.when((F.col(label_col) == neg) & (F.col(prediction_col) == neg), 1.0) \
        .otherwise(0.0).alias('_tn')

    sum_df = df.select(tp, fn, fp, tn).agg({
        '_tp': 'sum',
        '_fn': 'sum',
        '_fp': 'sum',
        '_tn': 'sum'
    })

    agg = sum_df.head().asDict()
    apply_strip = lambda s: s.lstrip("sum(_").rstrip(")")
    return {apply_strip(key): value for key, value in agg.items()}


def metrics_of(df: DataFrame, label_col='label', prediction_col='prediction', labels=(0.0, 1.0)):
    """Compute the following statistics of the prediction results (applicable to binary classification):
        - accuracy,
        - precision,
        - recall, and
        - f1

    Args:
        df: the input dataframe
        label_col: the name of the label column
        prediction_col: the name of the prediction column
        labels: the values of negative label and positive label in order. Default is (0.0, 1.0).

    Returns:
        a dictionary with keys ``accuracy``, ``precision``, ``recall``, and ``f1``.
        For division by zero, NaN is placed in the corresponding field.
    """
    quadruple = quadruple_of(df, label_col, prediction_col, labels)
    tp, fn, fp, tn = quadruple['tp'], quadruple['fn'], quadruple['fp'], quadruple['tn']
    total = tp + fn + fp + tn
    assert total == df.count()

    accuracy = safe_div((tp + tn), total)
    precision = safe_div(tp, (tp + fp))
    recall = safe_div(tp, (tp + fn))
    f1 = safe_div((2 * tp), (2 * tp + fp + fn))
    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1': f1
    }
