import copy
import warnings
import numpy as np
import torch

from graph4nlp.pytorch.modules.evaluation.base import EvaluationMetricBase


class Accuracy(EvaluationMetricBase):
    """
        Calculate precision, recall, F1 for each labels

    Parameters
    ----------
    metrics: list
        Indicate the metric for the class to return.
        Note that each metric must be one of ``precision``, ``recall``, ``F1``, ``accuracy``.
        And the results' order is the same as the metrics.
    """

    def __init__(self, metrics):
        super().__init__()
        if isinstance(metrics, list):
            for metric in metrics:
                if metric not in ["precision", "recall", "F1", "accuracy"]:
                    raise TypeError(
                        "argument metric must be list of str containing "
                        "'precision', 'recall', 'F1', 'accuracy'"
                    )
        self.metrics = metrics

    def calculate_scores(
        self, ground_truth, predict, average=None, zero_division="warning", sample_weight=None
    ):
        """
            The function to calculate the expected metrics for each labels

        Parameters
        ----------
        ground_truth: torch.Tensor
            Ground truth (correct) target values, 1d tensor
        predict: torch.Tensor
            The predicted target values generated by classifier, 1d tensor
        average: string or None, [None (default), 'micro', 'macro', 'weighted']
            If set ``None``, it will return the scores for each class.
                Otherwise, it will be reduced by the strategy as follows:

            ``'micro'``:
                Calculate metrics globally by counting the total true positives,
                false negatives and false positives.

            ``'macro'``:
                Calculate metrics for each label, and calculate the unweighted
                average values.  This does not take label imbalance into account.

            ``'weighted'``:
                Calculate metrics for each label, and calculate the weighted
                average value. Note that the weight is the number of the true
                instances for each label.

        zero_division: "warning", 0, 1, default="warning"
                Sets the value to return when there is a zero division.

                If set to "warning", this acts as 0, but warnings are also raised.

        sample_weight: None
            The sample weight. It is not implemented yet.

        Returns
        -------
        scores: list[object]
            Return the expected metrics initialized in init function in ``precision``, ``recall``, \
            ``F1`` order
        """
        ground_truth_np, predict_np = self._check_available(ground_truth, predict, zero_division)

        # calculate accuracy
        scores = ground_truth_np == predict_np
        accuracy_score = np.average(scores)

        if self.metrics is ["precision"]:
            return [accuracy_score]

        mcm = self._calculate_confusion_matrix(ground_truth=ground_truth_np, predict=predict_np)

        tp_sum = mcm[:, 1, 1]
        pred_sum = tp_sum + mcm[:, 0, 1]
        gt_sum = tp_sum + mcm[:, 1, 0]

        if average == "micro":
            tp_sum = np.array([tp_sum.sum()])
            pred_sum = np.array([pred_sum.sum()])
            gt_sum = np.array([gt_sum.sum()])

        # calculate precision and recall
        precision = self._prf_divide(tp_sum, pred_sum, zero_division=zero_division)
        recall = self._prf_divide(tp_sum, gt_sum, zero_division=zero_division)

        # calculate F_beta
        beta2 = 1 ** 2  # note: only F1 here
        denominator = beta2 * precision + recall

        denominator[denominator == 0.0] = 1  # avoid division by 0
        f_score = (1 + beta2) * precision * recall / denominator

        if average == "weighted":
            weighted = gt_sum
        else:
            weighted = None

        if average is not None:
            precision = np.average(precision, weights=weighted)
            recall = np.average(recall, weights=weighted)
            f_score = np.average(f_score, weights=weighted)
        scores = []

        for metric_name in self.metrics:
            if metric_name == "precision":
                scores.append(precision)
            elif metric_name == "recall":
                scores.append(recall)
            elif metric_name == "F1":
                scores.append(f_score)
            elif metric_name == "accuracy":
                scores.append(accuracy_score)
            else:
                raise NotImplementedError()
        return scores

    @staticmethod
    def _prf_divide(numerator, denominator, zero_division):
        """
            The function performs division and handles zero-division situations.

        Parameters
        ----------
        numerator: numpy.ndarray
        denominator: numpy.ndarray
        zero_division: "warning", 0, 1, default="warning"
            Sets the value to return when there is a zero division.

            If set to "warning", this acts as 0, but warnings are also raised.
        Returns
        -------
        results: numpy.ndarray
            The division results.

        """
        zero_mask = denominator == 0.0
        denominator_cp = copy.deepcopy(denominator)
        denominator_cp[zero_mask] = 1.0
        ret = numerator / denominator_cp
        if np.sum(zero_mask) == 0:
            return ret
        ret[zero_mask] = 0.0 if zero_division in ["warning", 0] else 1.0
        if zero_division == "warning":
            warnings.warn("zero division encountered")
        return ret

    @staticmethod
    def _check_available(ground_truth, predict, zero_division):
        """
            The function to check the parameters.
            If all tests are passed, it will convert the tensor to numpy.

        Parameters
        ----------
        ground_truth: Any
        predict: Any
        zero_division: Any

        Returns
        -------
        ground_truth: numpy.ndarray
            numpy version of tensor ground_truth
        predict: numpy.ndarray
            numpy version of tensor predict

        Raises
        -------
        TypeError: TypeError
        ValueError: ValueError
        """
        if not isinstance(ground_truth, torch.Tensor):
            raise TypeError("argument ground_truth must be torch.tensor")
        if not isinstance(predict, torch.Tensor):
            raise TypeError("argument predict must be torch.tensor")
        if ground_truth.dtype not in [torch.int, torch.int8, torch.int16, torch.int32, torch.int64]:
            raise TypeError("argument ground_truth must be int tensor")
        if predict.dtype not in [torch.int, torch.int8, torch.int16, torch.int32, torch.int64]:
            raise TypeError("argument predict must be int tensor")
        if len(ground_truth.shape) != 1:
            raise TypeError("argument ground_truth must be 1d tensor")
        if len(predict.shape) != 1:
            raise TypeError("argument predict must be 1d tensor")
        if ground_truth.shape[0] != predict.shape[0]:
            raise ValueError("argument ground_truth and predict must be the same shape")

        zero_division_ok = False
        if isinstance(zero_division, str) and zero_division == "warning":
            zero_division_ok = True
        elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:
            zero_division_ok = True

        if not zero_division_ok:
            raise ValueError("argument zero_division must be in ['warning', 0, 1]")

        return ground_truth.numpy(), predict.numpy()

    def _calculate_confusion_matrix(self, ground_truth, predict):
        """
            The function to calculate the confusion matrix for multi-class inputs.
            The labels will be collected and relabeled. (eg: [1, 2, 3] --> [0, 1, 2])

            In multi-class confusion matrix :math:`MCM`, the count of true negatives is
            :math:`MCM_{:,0,0}`, false positives is :math:`MCM_{:,0,1}`, false negatives
            is :math:`MCM_{:,1,0}` and true positive is :math:`MCM_{:,1,1}`.

        Parameters
        ----------
        ground_truth: numpy.ndarray
        predict: numpy.ndarray

        Returns
        -------
        confusion_matrix: numpy.ndarray
            The confusion matrix which has the shape: [num_labels, 2, 2]
        """
        # select all labels, remove duplicates and sort
        unique_labels = sorted(self._get_unique_labels(ground_truth, predict))

        # do relabeling
        ground_truth_transformed = np.searchsorted(unique_labels, ground_truth)
        predict_transformed = np.searchsorted(unique_labels, predict)

        # the number of labels after relabeling
        n_labels = len(unique_labels)

        tp = ground_truth_transformed == predict_transformed
        tp_bins = ground_truth_transformed[tp]
        tp_sum = np.bincount(tp_bins, weights=None, minlength=n_labels)
        pred_sum = np.bincount(predict_transformed, minlength=n_labels)
        gt_sum = np.bincount(ground_truth_transformed, minlength=n_labels)
        fp = pred_sum - tp_sum
        fn = gt_sum - tp_sum
        tp = tp_sum
        tn = ground_truth_transformed.shape[0] - tp - fp - fn
        return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)

    @staticmethod
    def _get_unique_labels(*lists):
        """
            find the unique elements in the given lists

        Parameters
        ----------
        lists: [numpy.ndarray]
            List of lists which contain labels.
        Returns
        -------
        unique_labels: numpy.ndarray
            It has unique labels encountered in the ``lists``.
        """
        ret = []
        for li in lists:
            unique_li = np.unique(li)
            ret.extend(unique_li.tolist())
        ret = list(set(ret))
        return np.array(ret)
