"""
Collection of functions which enable the evaluation of a classifier's performance,
by showing confusion matrix, accuracy, recall, precision etc.
"""

import numpy as np
import sys

import matplotlib.pyplot as plt

from sklearn import metrics
from tabulate import tabulate
import math
import logging
from datetime import datetime

import torch
from sklearn.metrics import mean_squared_error, accuracy_score


def RMSE(y_true, y_pred,  squared=False):
    '''

    :param y_true: array-like of shape (n_samples,) or (n_samples, n_outputs), Ground truth values
    :param y_pred: array-like of shape (n_samples,) or (n_samples, n_outputs), Estimated target values
    :param squared: If True returns MSE value, if False returns RMSE value
    :return: loss
    '''
    return mean_squared_error(y_true, y_pred, squared=squared)

def Accuracy_Score(y_true, y_pred, normalize=True):
    '''

    :param y_true: 1d array-like, or label indicator array / sparse matrix, Ground truth labels
    :param y_pred: 1d array-like, or label indicator array / sparse matrix, Predicted labels
    :param normalize: If False, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples.
    :return:
    '''
    return accuracy_score(y_true, y_pred, normalize=normalize)
def type_transform(y_true, y_pred):
    if torch.is_tensor(y_true):
        gt = y_true.cpu().detach().numpy()
    else:
        gt = y_true
    if torch.is_tensor(y_pred):
        pred = y_pred.cpu().detach().numpy()
    else:
        pred = y_pred
    return gt, pred

def prob_transform(task, prob):
    if task == "regression":
        return prob
    elif task == "classification":

        return torch.argmax(prob, dim=1)
    else:
        pass

class AnalyzerMetric():
    def __init__(self, task):
        self.task = task
        self.metrics = {}

    def get_metrics(self, y_true, y_pred):
        y_pred = prob_transform(self.task, y_pred)
        gt, pred = type_transform(y_true, y_pred)
        if self.task == 'regression':
            self.metrics['RMSE'] = RMSE(gt, pred)
            return self.metrics['RMSE']
        elif self.task == 'classification':
            #print('    Calculating classification task metrics...')

            self.metrics['Accuracy'] = Accuracy_Score(gt, pred)
            return self.metrics['Accuracy']



