from torchvision import transforms
import os
os.getcwd()
from torch.utils.data import DataLoader
from model.vgg_fp2a_3cls import vgg16_bn as fp2a
from model.vgg_3cls import vgg16_bn
from metric import *
from sklearn import metrics
from torchvision import datasets, transforms
from sklearn.metrics import confusion_matrix
from args import get_args, test_kwargs
from torch.autograd import Variable
import torch.nn as nn
import os
os.getcwd()
import time
import torch
import numpy as np
import matplotlib.pyplot as plt
from model.swin_transformer import swin_tiny_patch4_window7_224 as create_swin


def val(model, val_loader):
    val_acc = []
    model.eval()
    count = 0
    val_running_loss = 0.0
    for images, labels, f in val_loader:
        images, labels = Variable(images.to(device)), Variable(labels.to(device))
        output = model(images)
        criterion = nn.CrossEntropyLoss().to(device)
        loss = criterion(output, labels)
        # val_acc.append(accuracy(output, labels))
        val_running_loss += loss.item()
        count += 1
    mean_val_loss = val_running_loss / count
    img = images.detach()
    # writer.add_image('val_image', img[0, :, :, :], global_step=epoch)
    # print(np.mean(val_acc), 'this is the val acc')


def test(model, device, _transforms, **kwargs):
    transforms = _transforms
    data_path = kwargs['data_path']
    bz = kwargs['batch_size']
    num_class = kwargs['num_class']
    labelnumber = [i for i in range(num_class)]
    test_valid = datasets.ImageFolder(root=data_path,
                                      transform=transforms)
    test_loader = torch.utils.data.DataLoader(test_valid, batch_size=16,
                                              shuffle=False,
                                              num_workers=0)
    # val(model, test_loader)
    time_all_start = time.time()
    correct, total = 0, 0
    class_correct = list(0. for i in range(num_class))
    class_total = list(0. for i in range(3))
    allpre = []
    alllabel = []
    _val_acc = []
    for i, data in enumerate(test_loader, 0):
        inputs, labels = data[0].to(device), data[1].to(device)
        path = data[-1]
        t1 = time.time()

        if name == "mmal":
            proposalN_windows_score, proposalN_windows_logits, indices, \
            window_scores, _, raw_logits, local_logits, _ = model(inputs, 199, i, 'test')
            outputs = raw_logits
        elif name == "fp2a":
            outputs = model(inputs, PATH=path)
        else:
            outputs = model(inputs)
        # print("Single_image_time: ", time.time()-t1)
        # filenames.extend(filename)
        # _val_acc.append(accuracy(outputs, labels))
        _, predicted = torch.max(outputs.data, 1)
        p = predicted.detach().cpu().numpy()
        p = p.tolist()
        allpre.extend(p)
        l = labels.detach().cpu().numpy()
        l = l.tolist()
        alllabel.extend(l)
        total += labels.size(0)
        if 1:
            sen,pre,spe,acc= clinical_evaluate(l, p)
        else:
            correct += (predicted == labels).sum().item()
            c = (predicted == labels).squeeze()
            for i in range(labels.size(-1)):
                label = labels[i]
                try:
                    class_correct[label] += c[i].item()
                except:
                    print('as')
                class_total[label] += 0
    print(np.mean(_val_acc), 'test acc new')
            
    time_all_end = time.time()
    print("all_image_times: ", time_all_start-time_all_end)
    print('Accuracy of the network on the images: %.2f %%' % (
            100 * correct / total))
    for i in range(len(labelnumber)):
        print('Accuracy of %5s : %.2f %%' % (
            labelnumber[i], 100 * class_correct[i] / (class_total[i]+1e-9)))
    
    target = ['class 0', 'class 1', 'class 2',  'class 3',  'class 4']
    print(metrics.classification_report(alllabel, allpre, labels=labelnumber, digits=4, target_names=target))
    make_confusion_matrix(alllabel, allpre, labelnumber, method=kwargs['method'])


def make_confusion_matrix(y_true, y_pred, labels, normalize=False, vis=True, method='confusion matrix'):
    tick_marks = np.array(range(len(labels))) + 0.5
    cm = confusion_matrix(y_true, y_pred, labels=labels)
    if normalize:
        np.set_printoptions(precision=2)
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    print(cm)
    if vis:
        plt.figure(figsize=(12, 8), dpi=120)
        ind_array = np.arange(len(labels))
        x, y = np.meshgrid(ind_array, ind_array)
        for x_val, y_val in zip(x.flatten(), y.flatten()):
            c = cm[y_val][x_val]
            if c > 0.01:
                plt.text(x_val, y_val, "%0.2f" % (c,), color='white' if c > cm.max()/2 else 'black', fontsize=10, va='center', ha='center')
        plt.gca().set_xticks(tick_marks, minor=True)
        plt.gca().set_yticks(tick_marks, minor=True)
        plt.gca().xaxis.set_ticks_position('none')
        plt.gca().yaxis.set_ticks_position('none')
        plt.grid(True, which='minor', linestyle='-')
        plt.gcf().subplots_adjust(bottom=0.15)
        plot_confusion_matrix(cm, labels,title=method)
        plt.savefig('confusion_matrix.png', format='png')
        plt.show()


def clinical_evaluate(Y_test, Y_pred, n):

    sen, pre, spe, acc = [], [], [], []
    con_mat = confusion_matrix(Y_test, Y_pred)
    for i in range(n):
        number = np.sum(con_mat[:,:])
        tp = con_mat[i][i]
        fn = np.sum(con_mat[i,:]) - tp
        fp = np.sum(con_mat[:,i]) - tp
        tn = number- tp - fn - fp

        sen1 = tp/(tp+fn)
        sen.append(sen1)

        pre1 = tp/(tp+fp)
        pre.append(pre1)

        spe1 = tn/(tn+fp)
        spe.append(spe1)

        acc1 = (tp+tn) / number
        acc.append(acc1)

    return sen, pre, spe, acc        


def plot_confusion_matrix(cm, labels, title='Confusion Matrix', cmap=plt.cm.Blues):
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    xlocations = np.array(range(len(labels)))
    plt.xticks(xlocations, labels, rotation=90)
    plt.yticks(xlocations, labels)
    plt.ylabel('True label')
    plt.xlabel('Predicted label')


def load_model(model, state_dict, device):
    model.load_state_dict(state_dict)
    model.to(device)
    model.eval()
    return model

from args import get_args, test_kwargs
from torchvision.models import resnet18
from model.vgg_p2psa_position1_3cls import vgg16_bn as position1
from model.vgg_p2psa_position2_3cls import vgg16_bn as position2
from model.vgg_p2psa_position3_3cls import vgg16_bn as position3
from model.vgg_p2psa_position4_3cls import vgg16_bn as position4
from model.vgg_p2psa_3cls import vgg16_bn as p2psa
from model.vgg_ml_cam_3cls_random import vgg16_bn as ml_cam
from model.mmal_net import MainNet
from model.config_mmal_net import num_classes, model_name, model_path, lr_milestones, lr_decay_rate, input_size, \
    root, end_epoch, save_interval, init_lr, batch_size, CUDA_VISIBLE_DEVICES, weight_decay, \
    proposalN, set, channels


def model_choose(name):
    if name == 'fp2a':
        net1 = fp2a()
        params = torch.load('./runs/train_ROI-3cls-in-random_vggfp2a/best_loss.pth')
        model = load_model(net1, params, device)
    elif name == 'vgg':
        net = vgg16_bn()
        params = torch.load('./runs/train_ROI-3cls-in-random_vgg/best_acc_194.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'p2psa':
        net = p2psa()
        params = torch.load('./runs/train_ROI-3cls-in-random_vggp2psa/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'position1':
        net = position1()
        params = torch.load('./runs/ablation_p2psa_position1/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'position2':
        net = position2()
        params = torch.load('./runs/ablation_p2psa_position2/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'position4':
        net = position3()
        params = torch.load('./runs/ablation_p2psa_position3/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'position3':
        net = position4()
        params = torch.load('./runs/ablation_p2psa_position4/best_acc_180.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'swin':
        net = create_swin(num_classes=args.num_classes)
        params = torch.load('./runs/train_ROI-3cls-in-random_swin/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'resnet':
        net = resnet18()
        params = torch.load('./runs/ablation_resnet_position0/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'ml_cam':
        net = ml_cam()
        params = torch.load('./runs/train_ROI-3cls-in-random_vggmlcam/best_acc_195.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'mmal':
        net = MainNet(proposalN=proposalN, num_classes=num_classes, channels=channels)
        params = torch.load('./runs/train_ROI-3cls-in-random_mmal_net/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == "trans_fg":
        from model.trans_fg import VisionTransformer, CONFIGS
        config = CONFIGS[args.model_type]
        config.split = args.split
        config.slide_step = args.slide_step
        net = VisionTransformer(config, 448, zero_head=True, num_classes=args.num_classes, smoothing_value=args.smoothing_value)
        params = torch.load('./runs/train_ROI-3cls-in-random_trans_fg/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'self_made_1':
        net = resnet18()
        params = torch.load('./runs/SELF_MADE_1/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'self_made_2':
        net = resnet18()
        params = torch.load('./runs/SELF_MADE_2/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'self_made_3':
        net = resnet18()
        params = torch.load('./runs/SELF_MADE_3/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'self_made_4':
        net = resnet18()
        params = torch.load('./runs/SELF_MADE_4/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'self_made_5':
        net = resnet18()
        params = torch.load('./runs/SELF_MADE_5/best_loss.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'self_made_6':
        net = resnet18()
        params = torch.load('./runs/SELF_MADE_3/best_acc_52.pth', map_location=device)
        model = load_model(net, params, device)
    return model


if __name__ == '__main__':
    name = 'fp2a'
    args = get_args()
    gray_model = ['vgg19', 'san10', 'san15', 'apcnn']
    kwargs = test_kwargs('../runs/final_train_RANet/best_loss.pth')
    kwargs['method'] = name
    kwargs['data_path'] = './datasets/ROI-3cls-in-random/test'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model_choose(name)
    if name in gray_model:
        _transforms = kwargs['gray_transforms']
    else:
        _transforms = kwargs['transforms']
    test(model, device, _transforms, **kwargs)
