from __future__ import annotations

from dataclasses import dataclass
from dataclasses_json import dataclass_json
from tabulate import tabulate
import numpy as np
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score, confusion_matrix


@dataclass_json
@dataclass
class MetricsReport:
    accuracy: float
    recall: float
    precision: float
    f1_score: float
    confusion_matrix: np.ndarray

    def __str__(self):
        header = ['acc', 'recall', 'precision', 'f1score']
        table = [[self.accuracy, self.recall, self.precision, self.f1_score]]
        return tabulate(table, header, tablefmt='grid')

    def detail_report(self):
        tn, fp, fn, tp = self.confusion_matrix.ravel()
        return "accuracy<{:.4f}>, recall<{:.4f}>, precision<{:.4f}> f1_score<{:.4f}> confusion_matrix<TN<{:.1f}>, FP<{:.1f}>, FN<{:.1f}>, TP<{:.1f}>>".format(
            self.accuracy, self.recall, self.precision, self.f1_score, tn, fp, fn, tp
        )

    @staticmethod
    def by_sequence(truth, predicted, labels) -> MetricsReport:
        """generate metric score by predicted and truth
        Args:
            predicted ([type]): predicted value
            truth ([type]): truth value
        Returns:
            MetricsReport: the final MetricsReport
        """
        # 1. check the type of the object
        return MetricsReport(
            accuracy=accuracy_score(truth, predicted),
            recall=recall_score(truth, predicted, labels=labels, average='micro'),
            precision=precision_score(truth, predicted, labels=labels, average='micro'),
            f1_score=f1_score(truth, predicted, labels=labels, average='micro'),
            confusion_matrix=confusion_matrix(truth, predicted, labels=labels)
        )
