from typing import Dict, List

import numpy as np

from instruction_re.core.datatypes import Relation


# def calculate_metrics(preds: List[Relation], truth: List[Relation], options: List[str]):
#     """
#     根据预测结果和真实结果，计算 precision, recall, f1
#     """
#     intersection = list(set(preds) & set(truth))

#     precision = len(intersection) / len(preds) if len(preds) != 0 else 0
#     recall = len(intersection) / len(truth) if len(truth) != 0 else 0
#     f1 = (
#         2 * precision * recall / (precision + recall)
#         if (precision + recall) != 0
#         else 0
#     )

#     return precision, recall, f1


def calculate_metrics(
    spans_pred: List[List[Relation]],
    spans_true: List[List[Relation]],
    options: List[str],
):
    """
    Built confusion matrix and calculate metrics (precision, recall, f1-score)
    average options: micro, macro, weighted
    :param spans_pred: predicted spans over batch / epoch
    :param spans_true: true spans over batch / epoch
    :param options: list of labels presented in data
    :return: dictionary of per label metrics and average metrics
    """

    label2index = {"O": 0}
    for option in options:
        label2index[option] = len(label2index)

    confusion_matrix = build_confusion_matrix(
        preds=spans_pred, truth=spans_true, label2index=label2index
    )

    support_per_label = calculate_support_classes(spans=spans_true)

    metrics_per_label = calculate_metrics_from_confusion_matrix(
        confusion_matrix=confusion_matrix, label2index=label2index
    )

    metrics_per_label = merge_metrics_and_support(
        metrics_per_label=metrics_per_label, support_per_label=support_per_label
    )

    metrics = add_average_metrics(
        confusion_matrix=confusion_matrix,
        label2index=label2index,
        metrics=metrics_per_label,
    )

    return metrics


def calculate_support_classes(spans: List[List[Relation]]):
    """
    Calculate how many items there are for every label
    :param spans: true spans
    """
    support_dict = {}
    spans = [span for sub_spans in spans for span in sub_spans]
    for span in spans:
        if span.type not in support_dict:
            support_dict[span.type] = 1
        else:
            support_dict[span.type] += 1
    return support_dict


def build_confusion_matrix(
    preds: List[List[Relation]],
    truth: List[List[Relation]],
    label2index: Dict[str, int],
) -> np.array:
    """
    Build confusion matrix based on true and predicted relation triples
    :param pred_triples: predicted relation triple over batch / epoch
    :param truth: true relation triple over batch / epoch
    :param label2index: mapping between label and its index in confusion matrix
    :return: confusion matrix
    """

    confusion_matrix = np.zeros((len(label2index), len(label2index)))

    for spans_pred, spans_true in zip(preds, truth):
        confusion_matrix = update_confusion_matrix(
            preds=spans_pred,
            truth=spans_true,
            confusion_matrix=confusion_matrix,
            label2index=label2index,
        )

    return confusion_matrix


def update_confusion_matrix(
    preds: List[Relation],
    truth: List[Relation],
    confusion_matrix: np.array,
    label2index: Dict[str, int],
) -> np.array:
    """
    Update confusion matrix based on spans from one data item
    :param preds: predicted relation triples
    :param truth: true relation triples
    :param confusion_matrix: matrix (n_labels, n_labels)
    :param label2index: mapping between label and its index in confusion matrix
    :return: confusion matrix
    """
    tp = list(filter(lambda x: x in truth, preds))
    for t in tp:
        confusion_matrix[label2index[t.type]][label2index[t.type]] += 1

    for t in truth:
        if t not in preds:
            confusion_matrix[label2index[t.type]][label2index["O"]] += 1

    for p in preds:
        if p not in truth:
            confusion_matrix[label2index["O"]][label2index[p.type]] += 1

    return confusion_matrix


def calculate_metrics_from_confusion_matrix(
    confusion_matrix: np.array, label2index: Dict[str, int]
) -> Dict[str, Dict[str, float]]:
    """
    Calculate Precision, Recall, F1-Score per label (without average)
    :param confusion_matrix: np.array
    :param label2index: mapping between label and its index in confusion matrix
    :return:
    """

    metrics = {}

    for label, idx in label2index.items():

        if label == "O":
            continue

        metrics_per_label = {}

        true_positive = confusion_matrix[idx][idx]
        precision = (
            true_positive / (np.sum(confusion_matrix[:, idx]))
            if true_positive > 0
            else 0
        )
        recall = (
            true_positive / (np.sum(confusion_matrix[idx, :]))
            if true_positive > 0
            else 0
        )

        metrics_per_label["precision"] = precision
        metrics_per_label["recall"] = recall
        metrics_per_label["f1-score"] = (
            2 * precision * recall / (precision + recall)
            if precision > 0 and recall > 0
            else 0
        )

        metrics[label] = metrics_per_label

    return metrics


def merge_metrics_and_support(
    metrics_per_label: Dict[str, Dict[str, float]], support_per_label: Dict[str, float]
) -> Dict[str, Dict[str, float]]:
    """
    Merge dictionary with metrics with support for every label
    :param metrics_per_label: dictionary with label and its metrics
    :param support_per_label: dictionary with label and its support
    """
    for label in metrics_per_label:
        if label in support_per_label:
            metrics_per_label[label]["support"] = support_per_label[label]
        else:
            metrics_per_label[label]["support"] = 0

    return metrics_per_label


def add_average_metrics(
    confusion_matrix: np.array,
    label2index: Dict[str, int],
    metrics: Dict[str, Dict[str, float]],
) -> Dict[str, Dict[str, float]]:
    """
    Add micro average, marco average, and weighted average metrics
    :param confusion_matrix: np.array
    :param label2index: mapping between label and its index in confusion matrix
    :param metrics: metrics per label
    :return:
    """

    precisions, recalls, f1scores, supports = [], [], [], []

    for label, metrics_label in metrics.items():

        precisions.append(metrics_label["precision"])
        recalls.append(metrics_label["recall"])
        f1scores.append(metrics_label["f1-score"])
        supports.append(metrics_label["support"])

    supports_proportions = [support / np.sum(supports) for support in supports]

    metrics["macro_avg"] = {
        "precision": np.mean(precisions),
        "recall": np.mean(recalls),
        "f1-score": np.mean(f1scores),
    }

    idxs = [value for key, value in label2index.items() if key != "O"]
    true_positive_total = np.sum(np.diag(confusion_matrix))  # TODO simplify this
    false_positive_total = np.sum(
        [np.sum(confusion_matrix[:][idx]) - confusion_matrix[idx][idx] for idx in idxs]
    )
    false_negative_total = np.sum(
        [np.sum(confusion_matrix[idx][:]) - confusion_matrix[idx][idx] for idx in idxs]
    )
    precision_micro = true_positive_total / (true_positive_total + false_positive_total)
    recall_micro = true_positive_total / (true_positive_total + false_negative_total)

    metrics["micro_avg"] = {
        "precision": precision_micro,
        "recall": recall_micro,
        "f1-score": (
            2 * precision_micro * recall_micro / (precision_micro + recall_micro)
            if precision_micro > 0 and recall_micro > 0
            else 0
        ),
    }

    metrics["weighted_avg"] = {
        "precision": np.average(precisions, weights=supports_proportions),
        "recall": np.average(recalls, weights=supports_proportions),
        "f1-score": np.average(f1scores, weights=supports_proportions),
    }

    return metrics
