import torch
import torch.nn.functional as F
from torch import nn
from tqdm import tqdm

import wandb
from utils.dice_score import multiclass_dice_coeff, dice_coeff
from utils.utils import specificityCalc
from sklearn.metrics import roc_auc_score, accuracy_score, recall_score
import numpy as np


def evaluate(net, dataloader, device):
    net.eval()
    num_val_batches = len(dataloader)
    dice_score = 0

    # iterate over the validation set
    for batch in tqdm(dataloader, total=num_val_batches, desc='Validation round', unit='batch', leave=False):
        image, masks_true = batch['image'], batch['mask']
        # move images and labels to correct device and type
        image = image.to(device=device, dtype=torch.float32)

        with torch.no_grad():
            # predict the mask
            mask_pred = net(image)


            mask_true = (masks_true / 255).to(device=device, dtype=torch.long)
            mask_true = F.one_hot(mask_true, net.n_classes).permute(0, 3, 1, 2).float()
            # convert to one-hot format
            if net.n_classes == 1:
                mask_pred = (F.sigmoid(mask_pred) > 0.5).float()
                # compute the Dice score
                dice_score += dice_coeff(mask_pred, mask_true, reduce_batch_first=False)
            else:
                mask_pred = F.one_hot(mask_pred.argmax(dim=1), net.n_classes).permute(0, 3, 1, 2).float()
                # compute the Dice score, ignoring background
                dice_score += multiclass_dice_coeff(mask_pred[:, 1:, ...], mask_true[:, 1:, ...],
                                                    reduce_batch_first=False)

    net.train()

    # Fixes a potential division by zero error
    if num_val_batches == 0:
        return dice_score
    return dice_score / num_val_batches / len(mask_pred)


def evaluate_classification(net, dataloader, device):
    net.eval()
    num_val_batches = len(dataloader)
    dice_score = 0
    prob_all = []
    label_all = []
    # iterate over the validation set
    for batch in tqdm(dataloader, total=num_val_batches, desc='Validation round', unit='batch', leave=False):
        image, masks_true, labels = batch['image'], batch['mask'], batch['class']
        # move images and labels to correct device and type
        image = image.to(device=device, dtype=torch.float32)
        masks_true = (masks_true / 255).to(device=device, dtype=torch.long)

        with torch.no_grad():
            # predict the mask
            masks_pred, class_pred = net(image)

            for channel_index in range(len(masks_pred)):
                mask_pred = masks_pred[channel_index]
                mask_true = masks_true[:, :, :, channel_index]
                mask_true = F.one_hot(mask_true, net.n_classes).permute(0, 3, 1, 2).float()
                # convert to one-hot format
                if net.n_classes == 1:
                    mask_pred = (F.sigmoid(mask_pred) > 0.5).float()
                    # compute the Dice score
                    dice_score += dice_coeff(mask_pred, mask_true, reduce_batch_first=False)
                else:
                    mask_pred = F.one_hot(mask_pred.argmax(dim=1), net.n_classes).permute(0, 3, 1, 2).float()
                    # compute the Dice score, ignoring background
                    dice_score += multiclass_dice_coeff(mask_pred[:, 1:, ...], mask_true[:, 1:, ...],
                                                        reduce_batch_first=False)

            outputs = nn.functional.softmax(class_pred, dim=1)
            prob_all.extend(outputs[:, ].cpu().numpy())
            label_all.extend(labels.cpu().numpy())

    dice_score = dice_score / len(masks_pred)
    # print (np.argmax(prob_all,axis=1)==label_all)
    acc = accuracy_score(label_all, np.argmax(prob_all, axis=1))
    auc = roc_auc_score(label_all, prob_all, multi_class="ovr", average="macro")
    # recall = recall_score(label_all,np.argmax(prob_all,axis=1),average="weighted")
    sensitivity = recall_score(label_all, np.argmax(prob_all, axis=1), average="macro")
    specificity = specificityCalc(np.argmax(prob_all, axis=1), label_all)
    net.train()

    # Fixes a potential division by zero error
    if num_val_batches == 0:
        return dice_score
    return prob_all, label_all, dice_score / num_val_batches, acc, auc, sensitivity, specificity


def evaluate_all_folders(prob_all, label_all, dice, experiment, step):
    acc = accuracy_score(label_all, np.argmax(prob_all, axis=1))
    auc_macro = roc_auc_score(label_all, prob_all, multi_class="ovr", average="macro")
    auc_weighted = roc_auc_score(label_all, prob_all, multi_class="ovr", average="weighted")
    # recall = recall_score(label_all,np.argmax(prob_all,axis=1),average="weighted")
    sensitivity = recall_score(label_all, np.argmax(prob_all, axis=1), average="macro")
    specificity = specificityCalc(np.argmax(prob_all, axis=1), label_all)

    # experiment.log({'accuracy' :acc,'AUC':auc_macro,'sensitivity':sensitivity,'specificity':specificity,'dice':dice})
    experiment.summary["test_accuracy"] = acc
    experiment.summary['auc_macro'] = auc_macro
    experiment.summary['auc_weighted'] = auc_weighted
    experiment.summary['sensitivity'] = sensitivity
    experiment.summary['specificity'] = specificity
    experiment.summary['dice'] = dice / 10

    wandb_table_data = np.concatenate((np.array(prob_all), np.array(label_all)[:, np.newaxis]), axis=1)
    wandb_table = wandb.Table(data=wandb_table_data, columns=["NOR", "HCM", "HHD", "label"])
    wandb.log({
        "results": wandb_table,
        "roc_curve": wandb.plot.roc_curve(label_all, prob_all, labels=["NOR", "HCM", "HHD"], classes_to_plot=None),
        "conf_mat": wandb.plot.confusion_matrix(probs=np.array(prob_all), y_true=np.array(label_all),
                                                class_names=["NOR", "HCM", "HHD"])
    }, step=step)

