import torch
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report, precision_score, recall_score, f1_score
from sklearn.metrics import roc_auc_score


def draw_confusion_matrix(train_set, train_preds, classes_names):
    stacked = torch.stack(
        (
            train_set.int()
            , train_preds.int()
        )
        , dim=1
    )
    num_classes = len(classes_names)
    cmt = torch.zeros(num_classes, num_classes, dtype=torch.int16)
    for p in stacked:
        tl, pl = p.tolist()
        cmt[tl, pl] = cmt[tl, pl] + 1

    correct_count = 0
    for i in range(len(cmt)):
        correct_count += cmt[i][i]
    sensitivity_list = []
    specificity_list = []
    for j in range(len(cmt[0])):
        column = cmt[:, j]
        tp = cmt[j][j]
        tn = correct_count - tp
        sensitivity = torch.true_divide(tp, torch.sum(cmt[j]))
        specificity = torch.true_divide(tn, (tn + torch.sum(column) - tp))
        sensitivity_list.append(sensitivity)
        specificity_list.append(specificity)
    print('{:>11}'.format(''), end=' ')
    for class_name in classes_names:
        print('{:<5}'.format(class_name), end=' ')
    print()
    for i in range(len(cmt)):
        class_name = classes_names[i]
        print('{:>11}'.format(class_name), end=' ')
        for j in range(len(cmt[i])):
            print('{:<5}'.format(cmt[i][j].item()), end=' ')
        print()
    print('Sensitivity', end=' ')
    for j in range(len(sensitivity_list)):
        print('%.3f' % sensitivity_list[j].item(), end=' ')
    print()
    print('Specificity', end=' ')
    for j in range(len(specificity_list)):
        print('%.3f' % specificity_list[j].item(), end=' ')
    print()
    print('Average sensitivity:', torch.sum(torch.tensor(sensitivity_list)) / len(sensitivity_list))
    print('Average specificity:', torch.sum(torch.tensor(specificity_list)) / len(specificity_list))
    # print(cmt)
    return sensitivity_list, specificity_list, cmt


def draw_confusion_matrix_topk(train_set, logits, classes_names, k=2):
    # stacked = torch.stack(
    #     (
    #         train_set.view(-1,1).int()
    #         , logits
    #     )
    #     , dim=1
    # )
    num_classes = len(classes_names)
    cmt = torch.zeros(num_classes, num_classes, dtype=torch.int16)
    new_preds = []
    for i, logit in enumerate(logits):
        prob, indexes = torch.topk(logit, k, dim=0)
        label = train_set[i].int()
        if label in indexes:
            cmt[label, label] = cmt[label, label] + 1
            new_preds.append(label)
        else:
            cmt[label, indexes[0]] = cmt[label, indexes[0]] + 1
            new_preds.append(indexes[0])
    correct_count = 0
    for i in range(len(cmt)):
        correct_count += cmt[i][i]
    sensitivity_list = []
    specificity_list = []
    for j in range(len(cmt[0])):
        column = cmt[:, j]
        tp = cmt[j][j]
        tn = correct_count - tp
        sensitivity = torch.true_divide(tp, torch.sum(cmt[j]))
        specificity = torch.true_divide(tn, (tn + torch.sum(column) - tp))
        sensitivity_list.append(sensitivity)
        specificity_list.append(specificity)

    print('{:>11}'.format('Top-%i' % k), end=' ')
    for class_name in classes_names:
        print('{:<5}'.format(class_name), end=' ')
    print()
    for i in range(len(cmt)):
        class_name = classes_names[i]
        print('{:>11}'.format(class_name), end=' ')
        for j in range(len(cmt[i])):
            print('{:<5}'.format(cmt[i][j].item()), end=' ')
        print()
    print('Sensitivity', end=' ')
    for j in range(len(sensitivity_list)):
        print('%.3f' % sensitivity_list[j].item(), end=' ')
    print()
    print('Specificity', end=' ')
    for j in range(len(specificity_list)):
        print('%.3f' % specificity_list[j].item(), end=' ')
    print()
    print('Average sensitivity:', torch.sum(torch.tensor(sensitivity_list)) / len(sensitivity_list))
    print('Average specificity:', torch.sum(torch.tensor(specificity_list)) / len(specificity_list))
    # print(cmt)

    avg_precision, avg_recall, avg_f1, weighted_f1 = get_classification_report(train_set, torch.tensor(new_preds),
                                                                               classes_names)
    return sensitivity_list, specificity_list, cmt


def get_classification_report(truths, preds):
    cmt = confusion_matrix(truths, preds)
    print(cmt)
    report = classification_report(truths, preds)
    print(report)
    avg_precision = precision_score(truths, preds, average='macro')
    avg_recall = recall_score(truths, preds, average='macro')
    avg_f1 = f1_score(truths, preds, average='macro')
    weighted_f1 = f1_score(truths, preds, average='weighted')
    return avg_precision, avg_recall, avg_f1, weighted_f1


def get_auc(truths, outputs):
    # truths = truths.detach().cpu().to(torch.int16).numpy()
    # preds = outputs.detach().cpu().numpy()
    auc = roc_auc_score(truths, outputs, multi_class='ovr')
    print('---AUC:', auc)
    return auc
