import torch
from torch import Tensor
from typing import Any, Callable, Optional
from torchmetrics.classification.stat_scores import StatScores
from torchmetrics.functional.classification.stat_scores import _reduce_stat_scores, _stat_scores_update
from torchmetrics.utilities.enums import AverageMethod, MDMCAverageMethod
import warnings
import numpy as np
warnings.filterwarnings("ignore")  # 忽略告警
from torchmetrics import MetricTracker, F1Score, Accuracy, Recall, Precision, Specificity, ConfusionMatrix
from params import params_dict
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, cohen_kappa_score



def _my_sensitivity_compute(
    tp: Tensor,
    fp: Tensor,
    tn: Tensor,
    fn: Tensor,
    average: str,
    mdmc_average: Optional[str],
) -> Tensor:
    """Computes sensitivity from the stat scores: true positives, false positives, true negatives, false negatives.
    Args:
        tp: True positives
        fp: False positives
        tn: True negatives
        fn: False negatives
        average: Defines the reduction that is applied
        mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the
            ``average`` parameter)
    """

    numerator = tp
    denominator = tp + fn
    if average == AverageMethod.NONE and mdmc_average != MDMCAverageMethod.SAMPLEWISE:
        # a class is not present if there exists no TPs, no FPs, and no FNs
        meaningless_indeces = torch.nonzero((tp | fn | fp) == 0).cpu()
        numerator[meaningless_indeces, ...] = -1
        denominator[meaningless_indeces, ...] = -1
    return _reduce_stat_scores(
        numerator=numerator,
        denominator=denominator,
        weights=None if average != AverageMethod.WEIGHTED else denominator,
        average=average,
        mdmc_average=mdmc_average,
    )


class mySensitivity(StatScores):
    r"""
    Copy from the computation of torchmetrics `Specificity`_:

    Sensitivity = TP / (TP + FN) , while :
    Specificity = TN / (TN + FP)

    """
    is_differentiable = False
    higher_is_better = True

    def __init__(
        self,
        num_classes: Optional[int] = None,
        threshold: float = 0.5,
        average: str = "micro",
        mdmc_average: Optional[str] = None,
        ignore_index: Optional[int] = None,
        top_k: Optional[int] = None,
        multiclass: Optional[bool] = None,
        compute_on_step: bool = True,
        dist_sync_on_step: bool = False,
        process_group: Optional[Any] = None,
        dist_sync_fn: Callable = None,
    ) -> None:
        allowed_average = ["micro", "macro", "weighted", "samples", "none", None]
        if average not in allowed_average:
            raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.")

        super().__init__(
            reduce="macro" if average in ["weighted", "none", None] else average,
            mdmc_reduce=mdmc_average,
            threshold=threshold,
            top_k=top_k,
            num_classes=num_classes,
            multiclass=multiclass,
            ignore_index=ignore_index,
            compute_on_step=compute_on_step,
            dist_sync_on_step=dist_sync_on_step,
            process_group=process_group,
            dist_sync_fn=dist_sync_fn,
        )

        self.average = average

    def compute(self) -> Tensor:
        """Computes the sensitivity score based on inputs passed in to ``update`` previously.

        Return:
            The shape of the returned tensor depends on the ``average`` parameter

            - If ``average in ['micro', 'macro', 'weighted', 'samples']``, a one-element tensor will be returned
            - If ``average in ['none', None]``, the shape will be ``(C,)``, where ``C`` stands  for the number
              of classes
        """
        tp, fp, tn, fn = self._get_final_stats()
        return _my_sensitivity_compute(tp, fp, tn, fn, self.average, self.mdmc_reduce)

def sensitivityCalc(Predictions, Labels):
    MCM = multilabel_confusion_matrix(Labels, Predictions,
                                      sample_weight=None,
                                      labels=None, samplewise=None)
    # MCM此处是 5 * 2 * 2的混淆矩阵（ndarray格式），5表示的是5分类

    # 切片操作，获取每一个类别各自的 tn, fp, tp, fn
    tn_sum = MCM[:, 0, 0] # True Negative
    fp_sum = MCM[:, 0, 1] # False Positive

    tp_sum = MCM[:, 1, 1] # True Positive
    fn_sum = MCM[:, 1, 0] # False Negative

    # 这里加1e-6，防止 0/0的情况计算得到nan，即tp_sum和fn_sum同时为0的情况
    Condition_negative = tp_sum + fn_sum + 1e-6

    sensitivity = tp_sum / Condition_negative
    macro_sensitivity = np.average(sensitivity, weights=None)

    micro_sensitivity = np.sum(tp_sum) / np.sum(tp_sum+fn_sum)

    return macro_sensitivity, micro_sensitivity


def draw_confusion_matrix(label_true, label_pred, label_name, normlize, title="Confusion Matrix", pdf_save_path=None, dpi=100):
    """

    @param label_true: 真实标签，比如[0,1,2,7,4,5,...]
    @param label_pred: 预测标签，比如[0,5,4,2,1,4,...]
    @param label_name: 标签名字，比如['cat','dog','flower',...]
    @param normlize: 是否设元素为百分比形式
    @param title: 图标题
    @param pdf_save_path: 是否保存，是则为保存路径pdf_save_path=xxx.png | xxx.pdf | ...等其他plt.savefig支持的保存格式
    @param dpi: 保存到文件的分辨率，论文一般要求至少300dpi
    @return:

    example：
            draw_confusion_matrix(label_true=y_gt,
                          label_pred=y_pred,
                          label_name=["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"],
                          normlize=True,
                          title="Confusion Matrix on Fer2013",
                          pdf_save_path="Confusion_Matrix_on_Fer2013.png",
                          dpi=300)

    """
    cm1=confusion_matrix(label_true, label_pred)
    cm = confusion_matrix(label_true, label_pred)
    if normlize:
        row_sums = np.sum(cm, axis=1)
        cm = cm / row_sums[:, np.newaxis]
    cm=cm.T
    cm1=cm1.T
    plt.imshow(cm, cmap='Blues')
    plt.title(title)
    plt.xlabel("Predict label")
    plt.ylabel("Truth label")
    plt.yticks(range(label_name.__len__()), label_name)
    plt.xticks(range(label_name.__len__()), label_name, rotation=45)

    plt.tight_layout()

    plt.colorbar()

    for i in range(label_name.__len__()):
        for j in range(label_name.__len__()):
            color = (1, 1, 1) if i == j else (0, 0, 0)	# 对角线字体白色，其他黑色
            value = float(format('%.1f' % (cm[i, j]*100)))
            value1=str(value)+'%\n'+str(cm1[i, j])
            plt.text(i, j, value1, verticalalignment='center', horizontalalignment='center', color=color)

    # plt.show()
    if not pdf_save_path is None:
        plt.savefig(pdf_save_path, bbox_inches='tight',dpi=dpi)


def matrix(y_pred, y_true, labels_name):
    draw_confusion_matrix(label_true=y_true,
                          label_pred=y_pred,
                          label_name=labels_name,
                          normlize=True,
                          title="Confusion Matrix",
                          pdf_save_path="Confusion_Matrix.jpg",
                          dpi=300)


test_f1_en = F1Score(num_classes=params_dict['output_dim'], threshold=1. / params_dict['output_dim'], average="macro", task='multiclass')  # F1 score
test_acc_en = Accuracy(num_classes=params_dict['output_dim'], threshold=1. / params_dict['output_dim'], average="micro", task='multiclass')  # Accuracy
test_rcl_en = Recall(num_classes=params_dict['output_dim'], threshold=1. / params_dict['output_dim'], average="macro", task='multiclass')  # Recall
test_pcl_en = Precision(num_classes=params_dict['output_dim'], threshold=1. / params_dict['output_dim'], average="macro", task='multiclass')  # Precision
# test_sen_en = mySensitivity(num_classes=params_dict['output_dim'], threshold=1. / params_dict['output_dim'], average="macro", task='multiclass')  # my Sensitivity
test_spc_en = Specificity(num_classes=params_dict['output_dim'], threshold=1. / params_dict['output_dim'], average="macro", task='multiclass') # Specificity
test_conf_mat_en = ConfusionMatrix(num_classes=params_dict['output_dim'], threshold=1. / params_dict['output_dim'], task='multiclass')  # Confusion Matrix
