import matplotlib.pyplot as plt


def true_positive(y_true, y_pred):
    """
    Function to calculate True Positives
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: number of true positives
    """
    # initialize
    tp = 0
    for yt, yp in zip(y_true, y_pred):
        if yt == 1 and yp == 1:
            tp += 1
    return tp


def true_negative(y_true, y_pred):
    """
    Function to calculate True Negatives
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: number of true negatives
    """
    # initialize
    tn = 0
    for yt, yp in zip(y_true, y_pred):
        if yt == 0 and yp == 0:
            tn += 1
    return tn


def false_positive(y_true, y_pred):
    """
    Function to calculate False Positives
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: number of false positives
    """
    # initialize
    fp = 0
    for yt, yp in zip(y_true, y_pred):
        if yt == 0 and yp == 1:
            fp += 1
    return fp


def false_negative(y_true, y_pred):
    """
    Function to calculate False Negatives
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: number of false negatives
    """
    # initialize
    fn = 0
    for yt, yp in zip(y_true, y_pred):
        if yt == 1 and yp == 0:
            fn += 1
    return fn


def recall(y_true, y_pred):
    """
    Function to calculate recall
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: recall score
    """
    tp = true_positive(y_true, y_pred)
    fn = false_negative(y_true, y_pred)
    recall = tp / (tp + fn)
    return recall


def accuracy_v2(y_true, y_pred):
    """
    Function to calculate accuracy using tp/tn/fp/fn
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: accuracy score
    """
    tp = true_positive(y_true, y_pred)
    fp = false_positive(y_true, y_pred)
    fn = false_negative(y_true, y_pred)
    tn = true_negative(y_true, y_pred)
    accuracy_score = (tp + tn) / (tp + tn + fp + fn)
    return accuracy_score


def precision(y_true, y_pred):
    """
    Function to calculate precision
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: precision score
    """
    tp = true_positive(y_true, y_pred)
    fp = false_positive(y_true, y_pred)
    precision = tp / (tp + fp)
    return precision


def f1(y_true, y_pred):
    """
    Function to calculate f1 score
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: f1 score
    """
    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    score = 2 * p * r / (p + r)
    return score


def tpr(y_true, y_pred):
    """
    Function to calculate tpr
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: tpr/recall
    """
    return recall(y_true, y_pred)


def fpr(y_true, y_pred):
    """
    Function to calculate fpr
    :param y_true: list of true values
    :param y_pred: list of predicted values
    :return: fpr
    """
    fp = false_positive(y_true, y_pred)
    tn = true_negative(y_true, y_pred)
    return fp / (tn + fp)


if __name__ == '__main__':
    y_true = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
              1, 0, 0, 0, 0, 0, 0, 0, 1, 0]
    y_pred = [0.02638412, 0.11114267, 0.31620708,
              0.0490937, 0.0191491, 0.17554844,
              0.15952202, 0.03819563, 0.11639273,
              0.079377, 0.08584789, 0.39095342,
              0.27259048, 0.03447096, 0.04644807,
              0.03543574, 0.18521942, 0.05934905,
              0.61977213, 0.33056815]
    precisions = []
    recalls = []
    # how we assumed these thresholds is a long story
    thresholds = [0.0490937, 0.05934905, 0.079377,
                  0.08584789, 0.11114267, 0.11639273,
                  0.15952202, 0.17554844, 0.18521942,
                  0.27259048, 0.31620708, 0.33056815,
                  0.39095342, 0.61977213]
    # for every threshold, calculate predictions in binary
    # and append calculated precisions and recalls
    # to their respective lists
    for i in thresholds:
        temp_prediction = [1 if x >= i else 0 for x in y_pred]
        p = precision(y_true, temp_prediction)
        r = recall(y_true, temp_prediction)
        precisions.append(p)
        recalls.append(r)

    plt.figure(figsize=(7, 7))
    plt.plot(recalls, precisions)
    plt.xlabel('Recall', fontsize=15)
    plt.ylabel('Precision', fontsize=15)
    plt.show()

    # empty lists to store tpr
    # and fpr values
    tpr_list = []
    fpr_list = []
    # actual targets
    y_true = [0, 0, 0, 0, 1, 0, 1,
              0, 0, 1, 0, 1, 0, 0, 1]
    # predicted probabilities of a sample being 1
    y_pred = [0.1, 0.3, 0.2, 0.6, 0.8, 0.05,
              0.9, 0.5, 0.3, 0.66, 0.3, 0.2,
              0.85, 0.15, 0.99]
    # handmade thresholds
    thresholds = [0, 0.1, 0.2, 0.3, 0.4, 0.5,
                  0.6, 0.7, 0.8, 0.85, 0.9, 0.99, 1.0]
    # loop over all thresholds
    for thresh in thresholds:
        # calculate predictions for a given threshold
        temp_pred = [1 if x >= thresh else 0 for x in y_pred]
        # calculate tpr
        temp_tpr = tpr(y_true, temp_pred)
        # calculate fpr
        temp_fpr = fpr(y_true, temp_pred)
        # append tpr and fpr to lists
        tpr_list.append(temp_tpr)
        fpr_list.append(temp_fpr)
    plt.figure(figsize=(7, 7))
    plt.fill_between(fpr_list, tpr_list, alpha=0.4)
    plt.plot(fpr_list, tpr_list, lw=3)
    plt.xlim(0, 1.0)
    plt.ylim(0, 1.0)
    plt.xlabel('FPR', fontsize=15)
    plt.ylabel('TPR', fontsize=15)
    plt.show()
