from __future__ import annotations
import torch
from typing import List
import torch.distributed as dist
import math
import analytics.crop_recognition.common_settings.model_config as config


class Metric(object):
    """Base class for all metrics.
    From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py
    """

    def __init__(self) -> None:
        pass

    def reset(self):
        raise NotImplementedError()

    def add_samples(self, prediction, label):
        raise NotImplementedError()

    def value(self):
        raise NotImplementedError()

    @classmethod
    def from_tensor(cls, tensor, device):
        raise NotImplementedError()

    @classmethod
    def combine_metric(cls, metric_list: List[Metric]):
        raise NotImplementedError()


class Evaluator:
    def __init__(self) -> None:
        pass

    def reset():
        raise NotImplementedError()

    def get_metric_dict(self):
        raise NotImplementedError()

    @classmethod
    def all_gather_ddp(cls) -> Evaluator:
        raise NotImplementedError()

    def add(self, predictions, target):
        raise NotImplementedError()


class L2NormStatMatrix(Metric):
    """
    matrix for l2 norm for time series dataset.
    https://www.emathzone.com/tutorials/basic-statistics/combined-variance.html#:~:text=If%20X%20%C2%AF%20c%20is%20the%20combined%20mean,c%29%202%5D%20n%201%20%2B%20n%202%20Here

    2 matrix maintained

    1: Matrix of mean & variance of shape (2, T, C)
    2: Count tensor of shape (1,)
    """

    def __init__(self, time_length, chn_num, device="cpu") -> None:
        super().__init__()
        self.device = device
        self.l2_stat_matrix = torch.zeros(
            (2, time_length, chn_num), dtype=torch.float32
        ).to(device)
        self.cnt_matrix = torch.zeros((1), dtype=torch.int64).to(device)

    @classmethod
    def from_tensor(
        cls, matrix_tensor: torch.FloatTensor, cnt_tensor: torch.LongTensor, device
    ):
        _, T, C = matrix_tensor.shape
        new_matrix = L2NormStatMatrix(T, C, device)
        new_matrix.l2_stat_matrix = matrix_tensor.to(device)
        new_matrix.cnt_matrix = cnt_tensor.to(device)

        return new_matrix

    def reset(self):
        self.l2_stat_matrix.fill_(0)
        self.cnt_matrix.fill_(0)

    def add_samples(self, prediction, label):
        """
        prediction & label should be N * T * C tensors or numpy arrays, float32
        type

        """
        if not torch.is_tensor(prediction):
            prediction = torch.FloatTensor(prediction)

        if not torch.is_tensor(label):
            label = torch.FloatTensor(label)

        assert len(prediction.shape) == len(label.shape) == 3
        assert prediction.shape[0] == label.shape[0]
        assert prediction.shape[1:] == label.shape[1:] == self.l2_stat_matrix.shape[1:]

        cur_N = prediction.shape[0]

        prediction = prediction.to(self.device)
        label = label.to(self.device)
        new_stats_matrix = torch.zeros_like(self.l2_stat_matrix).to(self.device)
        l2_norm = prediction - label
        torch.mul(l2_norm, l2_norm, out=l2_norm)

        new_stats_matrix[1], new_stats_matrix[0] = torch.var_mean(
            l2_norm, dim=0, correction=0
        )
        new_cnt_tensor = torch.LongTensor([cur_N])
        new_metric = self.from_tensor(
            new_stats_matrix, new_cnt_tensor, device=self.device
        )
        self.add_other(new_metric)

    @classmethod
    def combine_metric(
        cls, metric_list: List[L2NormStatMatrix], device
    ) -> L2NormStatMatrix:
        if not metric_list:
            return L2NormStatMatrix(0, 0, device)

        result_metric = metric_list[0]
        for cur_metric in metric_list[1:]:
            result_metric.add_other(cur_metric)

        return result_metric

    def add_other(self, other: L2NormStatMatrix):
        n1 = self.cnt_matrix
        n2 = other.cnt_matrix
        new_cnt = n1 + n2

        xc = (n1 * self.l2_stat_matrix[0] + n2 * other.l2_stat_matrix[0]) / new_cnt
        mean_diff_1 = self.l2_stat_matrix[0] - xc
        mean_diff_2 = other.l2_stat_matrix[0] - xc
        sc_part1 = n1 * (self.l2_stat_matrix[1] + mean_diff_1 * mean_diff_1)
        sc_part2 = n2 * (other.l2_stat_matrix[1] + mean_diff_2 * mean_diff_2)

        self.l2_stat_matrix[0] = xc
        self.l2_stat_matrix[1] = (sc_part1 + sc_part2) / new_cnt
        self.cnt_matrix = new_cnt

    def value(self):
        return self.l2_stat_matrix, self.cnt_matrix


class ConfusionMatrix(Metric):
    """Constructs a confusion matrix for a multi-class classification problems.

    Does not support multi-label, multi-class problems.

    Keyword arguments:
    - num_classes (int): number of classes in the classification problem.
    - normalized (boolean, optional): Determines whether or not the confusion
    matrix is normalized or not. Default: False.

    Modified from: https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
    """

    def __init__(self, num_classes, device="cpu"):
        super().__init__()
        self.conf = torch.zeros((num_classes, num_classes), dtype=torch.int32).to(
            device
        )
        self.num_classes = num_classes
        self.device = device
        self.reset()

    @classmethod
    def from_tensor(cls, conf_tensor: torch.LongTensor, device) -> ConfusionMatrix:
        assert len(conf_tensor.shape) == 2
        assert conf_tensor.shape[0] == conf_tensor.shape[1]
        num_classes = conf_tensor.shape[0]
        tmp_instance = ConfusionMatrix(num_classes, device)
        tmp_instance.conf = conf_tensor.to(device)
        return tmp_instance

    def combine_other(self, other: ConfusionMatrix):
        assert self.num_classes == other.num_classes
        other = other.conf.to(self.device)
        self.conf = self.conf + other

    def reset(self):
        self.conf.fill_(0)

    def add_samples(
        self, predicted: torch.Tensor, target: torch.LongTensor
    ) -> ConfusionMatrix:
        """Computes the confusion matrix

        The shape of the confusion matrix is K x K, where K is the number
        of classes.

        Keyword arguments:
        - predicted (Tensor): Can be an N x L x ... x M x K tensor/array of
        predicted scores obtained from the model for N examples and K classes,
        or an N-tensor/array of integer values between 0 and K-1.
        - target (Tensor): Can be an N x L x ... x M x K tensor/array of
        ground-truth classes for N examples and K classes, or an N-tensor/array
        of integer values between 0 and K-1.
        """

        assert (
            predicted.shape[0] == target.shape[0]
        ), "number of targets and predicted outputs do not match"

        if len(predicted.shape) != 1:
            assert (
                predicted.shape[-1] == self.num_classes
            ), "number of predictions does not match size of confusion matrix"
            predicted = predicted.argmax(dim=-1)
        assert (predicted.max() < self.num_classes) and (
            predicted.min() >= 0
        ), "predicted values are not between 0 and k-1"

        if len(target.shape) != 1:
            assert (
                target.shape[-1] == self.num_classes
            ), "Onehot target does not match size of confusion matrix"
            assert torch.all(
                torch.isin(torch, torch.LongTensor([0, 1]))
            ), "in one-hot encoding, target values should be 0 or 1"
            assert (
                target.sum(dim=-1) == 1
            ).all(), "multi-label setting is not supported"
            target = target.argmax(dim=-1)

        assert (target.max() < self.num_classes) and (
            target.min() >= 0
        ), "target values are not between 0 and k-1"
        assert (
            target.shape == predicted.shape
        ), "prediction and target should have save shape."
        # hack for bincounting 2 arrays together
        x = predicted + self.num_classes * target
        bincount_2d = torch.bincount(x, minlength=self.num_classes ** 2)

        conf = bincount_2d.view((self.num_classes, self.num_classes))
        self.conf += conf

    @classmethod
    def combine_metric(cls, metric_list: List[ConfusionMatrix], device):
        if not metric_list:
            return ConfusionMatrix(1, device=device)
        dummy_metric = ConfusionMatrix(metric_list[0].num_classes, device)
        for cur_metric in metric_list[1:]:
            dummy_metric.combine_other(cur_metric)

        return dummy_metric

    def value(self):
        """
        Returns:
            Confustion matrix of K rows and K columns, where rows corresponds
            to ground-truth targets and columns corresponds to predicted
            targets.
        """

        return self.conf


class TimeSequenceConfusionMatrix(Metric):
    """Constructs a confusion matrix for a time sequence multi-class classification problems.

    Does not support multi-label, multi-class problems.

    The confusion matrix will be size of (time_len, num_class, num_class),
    representing the time index dimension, the ground truth dimension, and
    prediction dimension respectively.

    Keyword arguments:
    - time_len (int): How many time stamps will be take into consideration.
    - num_classes (int): number of classes in the classification problem.
    - device (str or int or torch.device), which device to use in torch.

    """

    def __init__(self, time_len, num_classes, device="cpu"):
        super().__init__()
        self.conf = torch.zeros(
            (time_len, num_classes, num_classes), dtype=torch.int32
        ).to(device)
        self.time_len = time_len
        self.num_classes = num_classes
        self.device = device
        self.reset()

    @classmethod
    def from_tensor(
        cls, conf_tensor: torch.LongTensor, device
    ) -> TimeSequenceConfusionMatrix:
        assert len(conf_tensor.shape) == 3
        assert conf_tensor.shape[1] == conf_tensor.shape[2]
        num_classes = conf_tensor.shape[1]
        time_len = conf_tensor.shape[0]
        tmp_instance = TimeSequenceConfusionMatrix(time_len, num_classes, device)
        tmp_instance.conf = conf_tensor.to(device)
        return tmp_instance

    def combine_other(self, other: TimeSequenceConfusionMatrix):
        assert self.num_classes == other.num_classes
        assert (
            self.time_len == other.time_len
        ), "Time len should equal: self: {}, other: {}".format(
            self.time_len, other.time_len
        )
        other = other.conf.to(self.device)
        self.conf = self.conf + other

    def reset(self):
        self.conf.fill_(0)

    def add_samples(
        self,
        predicted: torch.Tensor,
        target: torch.LongTensor,
        time_stamp: torch.LongTensor,
    ) -> TimeSequenceConfusionMatrix:
        """Computes the confusion matrix

        The shape of the confusion matrix is K x K, where K is the number
        of classes.

        Keyword arguments:
        - predicted (Tensor): (N, K) tensor.
        - target (Tensor): (N, K) or (N, ) tensor.
        - time_stamp (Tensor): int.
        """

        assert (
            predicted.shape[0] == target.shape[0]
        ), "number of targets and predicted outputs do not match, predicted: {}, target: {}".format(
            predicted.shape[0], target.shape[0]
        )

        if len(predicted.shape) != 1:
            assert (
                predicted.shape[-1] == self.num_classes
            ), "number of predictions does not match size of confusion matrix"
            predicted = predicted.argmax(dim=-1)
        assert (predicted.max() < self.num_classes) and (
            predicted.min() >= 0
        ), "predicted values are not between 0 and k-1, predicted_max: {}, num_class: {}".format(
            predicted.max(), self.num_classes
        )

        if len(target.shape) != 1:
            assert (
                target.shape[-1] == self.num_classes
            ), "Onehot target does not match size of confusion matrix"
            assert torch.all(
                torch.isin(torch, torch.LongTensor([0, 1]))
            ), "in one-hot encoding, target values should be 0 or 1"
            assert (
                target.sum(dim=-1) == 1
            ).all(), "multi-label setting is not supported"
            target = target.argmax(dim=-1)

        assert (target.max() < self.num_classes) and (
            target.min() >= 0
        ), "target values are not between 0 and k-1"
        assert (
            target.shape == predicted.shape
        ), "prediction and target should have save shape."
        assert (
            time_stamp.max() < self.time_len
        ), "Time stamp index should be smaller than time_len"
        # hack for bincounting 2 arrays together
        # predicted: (N, )
        # target: (N, )
        x = (
            predicted
            + self.num_classes * target
            + self.num_classes * self.num_classes * time_stamp
        )  # (N, )

        bincount_2d = torch.bincount(
            x, minlength=self.num_classes * self.num_classes * self.time_len
        )

        conf = bincount_2d.view((self.time_len, self.num_classes, self.num_classes))
        self.conf += conf

    @classmethod
    def combine_metric(cls, metric_list: List[TimeSequenceConfusionMatrix], device):
        if not metric_list:
            return TimeSequenceConfusionMatrix(1, device=device)
        dummy_metric = TimeSequenceConfusionMatrix(
            metric_list[0].time_len, metric_list[0].num_classes, device
        )
        for cur_metric in metric_list:
            dummy_metric.combine_other(cur_metric)

        return dummy_metric

    def value(self):
        """
        Returns:
            Confustion matrix of K rows and K columns, where rows corresponds
            to ground-truth targets and columns corresponds to predicted
            targets.
        """

        return self.conf


class BasicRegressionEvaluator(Evaluator):
    def __init__(self, time_length, chn_num, device) -> None:
        super().__init__()
        self.time_length = time_length
        self.chn_num = chn_num
        self.device = device
        self.l2_norm_matrix = L2NormStatMatrix(time_length, chn_num, device)

    def reset(self):
        self.l2_norm_matrix.reset()

    def add(self, prediction, label):
        self.l2_norm_matrix.add_samples(prediction, label)

    def get_metric_dict(self):
        mean_var_matrix, cnt_matrix = self.l2_norm_matrix.value()
        if int(cnt_matrix) == 0:
            return {
                "mean": torch.inf,
                "variance": torch.inf,
                "count": 0,
                "criteria": [torch.inf, torch.inf],
            }
        mean = mean_var_matrix[0]  # (T, C)
        var = mean_var_matrix[1]  # (T, C)
        count = int(cnt_matrix)

        metric_dict = {
            "mean": mean,
            "variance": var,
            "count": count,
            "criteria": [-float(torch.mean(mean)), -float(torch.mean(var))],
        }

        return metric_dict

    def all_gather_ddp(self):
        l2_norm_tensor, l2_cnt_tensor = self.l2_norm_matrix.value()
        l2_norm_tensor_list = [
            torch.zeros(l2_norm_tensor.shape, dtype=l2_norm_tensor.dtype).to(
                self.device
            )
            for _ in range(dist.get_world_size())
        ]
        cnt_tensor_list = [
            torch.zeros(l2_cnt_tensor.shape, dtype=l2_cnt_tensor.dtype).to(self.device)
            for _ in range(dist.get_world_size())
        ]
        dist.all_gather(l2_norm_tensor_list, l2_norm_tensor)
        dist.all_gather(cnt_tensor_list, l2_cnt_tensor)
        metric_list = [
            L2NormStatMatrix.from_tensor(
                l2_norm_tensor_list[idx], cnt_tensor_list[idx], self.device
            )
            for idx in range(dist.get_world_size())
        ]
        result_metric = L2NormStatMatrix.combine_metric(metric_list, device=self.device)
        dummy_evaluator = BasicRegressionEvaluator(
            self.time_length, self.chn_num, self.device
        )
        dummy_evaluator.l2_norm_matrix = result_metric
        return dummy_evaluator


class BasicClassificationEvaluator(Evaluator):
    """Computes the intersection over union (IoU) per class and corresponding
    mean (mIoU).

    Intersection over union (IoU) is a common evaluation metric for semantic
    segmentation. The predictions are first accumulated in a confusion matrix
    and the IoU is computed from it as follows:

        IoU = true_positive / (true_positive + false_positive + false_negative).

    Keyword arguments:
    - num_classes (int): number of classes in the classification problem
    - normalized (boolean, optional): Determines whether or not the confusion
    matrix is normalized or not. Default: False.
    - ignore_index (int or iterable, optional): Index of the classes to ignore
    when computing the IoU. Can be an int, or any iterable of ints.
    """

    def __init__(
        self,
        num_classes,
        ignore_index=None,
        cm_device="cpu",
    ):
        self.num_classes = num_classes
        self.device = cm_device
        self.conf_metric = ConfusionMatrix(
            num_classes,
            device=cm_device,
        )
        if ignore_index is None:
            self.ignore_index = None
        elif isinstance(ignore_index, int):
            self.ignore_index = (ignore_index,)
        else:
            try:
                self.ignore_index = tuple(ignore_index)
            except TypeError:
                raise ValueError("'ignore_index' must be an int or iterable")

    def reset(self):
        self.conf_metric.reset()

    def add(self, predicted, target):
        """Adds the predicted and target pair to the IoU metric.

        Keyword arguments:
        - predicted (Tensor): Can be a (N, K, H, W) tensor of
        predicted scores obtained from the model for N examples and K classes,
        or (N, H, W) tensor of integer values between 0 and K-1.
        - target (Tensor): Can be a (N, K, H, W) tensor of
        target scores for N examples and K classes, or (N, H, W) tensor of
        integer values between 0 and K-1.

        """

        self.conf_metric.add_samples(predicted.view(-1), target.view(-1))

    def get_metric_dict(self):
        conf_matrix = self.conf_metric.value()
        if torch.sum(conf_matrix) == 0:
            init_dict = {
                "confusion_matrix": conf_matrix,
                "miou": 0,
                "precision": 0,
                "recall": 0,
                "f1_score": 0,
                "criteria": [0],
            }
            for i in range(1, len(config.class_of_interests) + 1):
                init_dict[f"f1_socre+{config.class_of_interests[i]}"] = 0
            return init_dict

        for ignore_idx in self.ignore_index:
            conf_matrix[ignore_idx, :] = 0
        valid_idx = (
            torch.sum(conf_matrix, dim=1) != 0
        )  # (True Positive + False Negative) cannot be 0

        true_positive = torch.diag(conf_matrix)  # (C,)
        false_positive = torch.sum(conf_matrix, 0) - true_positive  # (C,)
        false_negative = torch.sum(conf_matrix, 1) - true_positive  # (C,)

        true_positive = true_positive[valid_idx]
        false_positive = false_positive[valid_idx]
        false_negative = false_negative[valid_idx]

        iou = true_positive / (true_positive + false_positive + false_negative)  # (C,)
        miou = float(torch.mean(iou))
        precision = true_positive / (true_positive + false_positive)
        precision[torch.isnan(precision)] = 1
        avg_precision = float(torch.mean(precision))
        recall = true_positive / (true_positive + false_negative)
        avg_recall = float(torch.mean(recall))
        f1_score = 2 * precision * recall / (precision + recall)
        avg_f1_score = float(torch.prod(f1_score))

        metric_dict = {
            "confusion_matrix": conf_matrix,
            "miou": miou,
            "precision": avg_precision,
            "recall": avg_recall,
            "f1_score": avg_f1_score,
            "criteria": [-avg_f1_score],
        }

        return metric_dict

    def all_gather_ddp(self) -> BasicClassificationEvaluator:
        confusion_mat = self.conf_metric.value()
        confusion_mat_list = [
            torch.zeros(confusion_mat.shape, dtype=confusion_mat.dtype).to(self.device)
            for _ in range(dist.get_world_size())
        ]
        dist.all_gather(confusion_mat_list, confusion_mat)
        metric_list = [
            ConfusionMatrix.from_tensor(cur_mat, self.device)
            for cur_mat in confusion_mat_list
        ]
        result_metric = ConfusionMatrix.combine_metric(metric_list, device=self.device)
        dummy_evaluator = BasicClassificationEvaluator(
            self.num_classes, self.ignore_index, self.device
        )
        dummy_evaluator.conf_metric = result_metric
        return dummy_evaluator


class TimeSequenceClassificationEvaluator(Evaluator):
    """
    Accumulate Time Sequence Confusion Matirx and calculate MIOU,
    AVG_PRECISION, AVG_RECALL and PROD_F1_SCORE.

    The confusion matrix will be size of (time_len, num_class, num_class),
    representing the time index dimension, the ground truth dimension, and
    prediction dimension respectively.

    Keyword arguments:
    - time_len (int): maximum time length of the data
    - num_classes (int): number of classes in the classification problem
    - ignore_index (int or iterable, optional): Index of the classes to ignore
    when computing the IoU. Can be an int, or any iterable of ints.
    """

    def __init__(
        self,
        time_len,
        num_classes,
        ignore_index=None,
        cm_device="cpu",
    ):
        self.time_len = time_len
        self.num_classes = num_classes
        self.device = cm_device
        self.conf_metric = TimeSequenceConfusionMatrix(
            time_len,
            num_classes,
            device=cm_device,
        )
        if ignore_index is None:
            self.ignore_index = tuple()
        elif isinstance(ignore_index, int):
            self.ignore_index = (ignore_index,)
        else:
            try:
                self.ignore_index = tuple(ignore_index)
            except TypeError:
                raise ValueError("'ignore_index' must be an int or iterable")
        if ignore_index:
            assert (
                max(ignore_index) < num_classes
            ), "Ignore index should be less than num_classes"

    def reset(self):
        self.conf_metric.reset()

    def add(self, predicted, target, timestamp):
        """Adds the predicted and target pair to the IoU metric.

        Keyword arguments:
        - predicted (Tensor): (N, C) or (N,)
        predicted scores obtained from the model for N examples and K classes,
        or (N, H, W) tensor of integer values between 0 and K-1.
        - target (Tensor): (N, C) or (N,)
        - timestamp (Tensor): (N,)

        """

        self.conf_metric.add_samples(predicted, target, timestamp)

    def get_metric_dict(self):
        ori_conf_matrix = self.conf_metric.value()  # (T, C, C)
        conf_matrix = torch.sum(ori_conf_matrix, dim=0)
        if torch.sum(conf_matrix) == 0:
            return {
                "confusion_matrix": conf_matrix,
                "miou": 0,
                "precision": 0,
                "recall": 0,
                "f1_score": 0,
                "crop_f1_score": [0],
                "criteria": [0],
            }

        for ignore_idx in self.ignore_index:
            conf_matrix[ignore_idx, :] = 0
        valid_idx = torch.sum(conf_matrix, dim=1) != 0

        true_positive = torch.diag(conf_matrix)  # (C,)
        false_positive = torch.sum(conf_matrix, 0) - true_positive  # (C,)
        false_negative = torch.sum(conf_matrix, 1) - true_positive  # (C,)

        true_positive = true_positive[valid_idx]
        false_positive = false_positive[valid_idx]
        false_negative = false_negative[valid_idx]

        iou = true_positive / (true_positive + false_positive + false_negative)  # (C,)
        miou = float(torch.mean(iou))
        precision = true_positive / (true_positive + false_positive)
        precision[torch.isnan(precision)] = 1
        avg_precision = float(torch.mean(precision))
        recall = true_positive / (true_positive + false_negative)
        avg_recall = float(torch.mean(recall))
        f1_score = 2 * precision * recall / (precision + recall)
        avg_f1_score = float(torch.prod(f1_score))

        metric_dict = {
            "confusion_matrix": ori_conf_matrix,
            "miou": miou,
            "precision": avg_precision,
            "recall": avg_recall,
            "f1_score": avg_f1_score,
            "crop_f1_score": f1_score,
            "criteria": [-avg_f1_score],
        }

        return metric_dict

    def all_gather_ddp(self) -> TimeSequenceClassificationEvaluator:
        confusion_mat = self.conf_metric.value()
        confusion_mat_list = [
            torch.zeros(confusion_mat.shape, dtype=confusion_mat.dtype).to(self.device)
            for _ in range(dist.get_world_size())
        ]
        dist.all_gather(confusion_mat_list, confusion_mat)
        metric_list = [
            TimeSequenceConfusionMatrix.from_tensor(cur_mat, self.device)
            for cur_mat in confusion_mat_list
        ]
        result_metric = TimeSequenceConfusionMatrix.combine_metric(
            metric_list, device=self.device
        )
        dummy_evaluator = TimeSequenceClassificationEvaluator(
            self.time_len, self.num_classes, self.ignore_index, self.device
        )
        dummy_evaluator.conf_metric = result_metric
        return dummy_evaluator


if __name__ == "__main__":
    evaluator = TimeSequenceClassificationEvaluator(5, 5)
    prediction = torch.LongTensor([1, 2, 3])
    label = torch.LongTensor([3, 2, 1])
    time = torch.LongTensor([0, 1, 3])
    evaluator.add(prediction, label, time)

    conf_mat = evaluator.get_metric_dict()["confusion_matrix"]
    print()
