from torchvision import transforms
import os
os.getcwd()
from torch.utils.data import DataLoader
from model.vgg16_cbam_se import vgg_se, vgg_cbam
from metric import *
from sklearn import metrics
from torchvision import datasets, transforms
from sklearn.metrics import confusion_matrix
from args import get_args, test_kwargs
# from train_ssl_multiHead_RelationNet17 import accuracy
from torch.autograd import Variable
import torch.nn as nn
from model.RelationNet_multihead import Net

import os
os.getcwd()
import time
from torchvision.models import resnet18
# from roc_models.resnet import ResNet18 as resnet18
from model.vgg import vgg16_bn
from roc_models.vgg19 import vgg19_bn as vgg19
from model.san import san
from roc_models.bilinear_cnn_all import bcnn
from roc_models.apcnn.model.vgg19 import vgg19 as apcnn
import torch
import numpy as np
import matplotlib.pyplot as plt


def val(model, val_loader):
    val_acc = []
    model.eval()
    count = 0
    val_running_loss = 0.0
    for images, labels, path in val_loader:
        with torch.no_grad():
            images, labels = Variable(images.to(device)), Variable(labels.to(device))
            output = model(images)
            criterion = nn.CrossEntropyLoss().to(device)
            loss = criterion(output, labels)
            # val_acc.append(accuracy(output, labels))
            val_running_loss += loss.item()
            count += 1
    mean_val_loss = val_running_loss / count
    img = images.detach()
    # writer.add_image('val_image', img[0, :, :, :], global_step=epoch)
    # print(np.mean(val_acc), 'this is the val acc')


def test(model, device, _transforms, **kwargs):
    transforms = _transforms
    data_path = kwargs['data_path']
    bz = kwargs['batch_size']
    num_class = kwargs['num_class']
    labelnumber = [i for i in range(num_class)]
    test_valid = datasets.ImageFolder(root=data_path,
                                      transform=transforms)
    test_loader = torch.utils.data.DataLoader(test_valid, batch_size=bz,
                                              shuffle=False,
                                              num_workers=0)
    val(model, test_loader)
    time_all_start = time.time()
    correct, total = 0, 0
    class_correct = list(0. for i in range(num_class))
    class_total = list(0. for i in range(5))
    allpre = []
    alllabel = []
    _val_acc = []
    with torch.no_grad():
        for i, data in enumerate(test_loader, 0):
            inputs, labels = data[0].to(device), data[1].to(device)
            t1 = time.time()
            outputs = model(inputs)
            # print("Single_image_time: ", time.time()-t1)
            # filenames.extend(filename)
            # _val_acc.append(accuracy(outputs, labels))
            _, predicted = torch.max(outputs.data, 1)
            p = predicted.detach().cpu().numpy()
            p = p.tolist()
            allpre.extend(p)
            l = labels.detach().cpu().numpy()
            l = l.tolist()
            alllabel.extend(l)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            c = (predicted == labels).squeeze()
            for i in range(labels.size(0)):
                label = labels[i]
                try:
                    class_correct[label] += c[i].item()
                except:
                    print('as')
                class_total[label] += 1
    print(np.mean(_val_acc), 'test acc new')
    time_all_end = time.time()
    print("all_image_times: ", time_all_start-time_all_end)
    print('Accuracy of the network on the images: %.2f %%' % (
            100 * correct / total))
    for i in range(len(labelnumber)):
        print('Accuracy of %5s : %.2f %%' % (
            labelnumber[i], 100 * class_correct[i] / (class_total[i]+1e-9)))
    
    target = ['class 0', 'class 1', 'class 2',  'class 3',  'class 4']
    print(metrics.classification_report(alllabel, allpre, labels=labelnumber, digits=4, target_names=target))
    make_confusion_matrix(alllabel, allpre, labelnumber, method=kwargs['method'])


def make_confusion_matrix(y_true, y_pred, labels, normalize=False, vis=True, method='confusion matrix'):
    tick_marks = np.array(range(len(labels))) + 0.5
    cm = confusion_matrix(y_true, y_pred, labels=labels)
    if normalize:
        np.set_printoptions(precision=2)
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    print(cm)
    if vis:
        plt.figure(figsize=(12, 8), dpi=120)
        ind_array = np.arange(len(labels))
        x, y = np.meshgrid(ind_array, ind_array)
        for x_val, y_val in zip(x.flatten(), y.flatten()):
            c = cm[y_val][x_val]
            if c > 0.01:
                plt.text(x_val, y_val, "%0.2f" % (c,), color='white' if c > cm.max()/2 else 'black', fontsize=10, va='center', ha='center')
        plt.gca().set_xticks(tick_marks, minor=True)
        plt.gca().set_yticks(tick_marks, minor=True)
        plt.gca().xaxis.set_ticks_position('none')
        plt.gca().yaxis.set_ticks_position('none')
        plt.grid(True, which='minor', linestyle='-')
        plt.gcf().subplots_adjust(bottom=0.15)
        plot_confusion_matrix(cm, labels,title=method)
        plt.savefig('confusion_matrix.png', format='png')
        plt.show()


def plot_confusion_matrix(cm, labels, title='Confusion Matrix', cmap=plt.cm.Blues):
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    xlocations = np.array(range(len(labels)))
    plt.xticks(xlocations, labels, rotation=90)
    plt.yticks(xlocations, labels)
    plt.ylabel('True label')
    plt.xlabel('Predicted label')


def load_model(model, state_dict, device):
    model.load_state_dict(state_dict)
    model.to(device)
    model.eval()
    return model


def model_choose(name):
    if name == 'resnet18':
        net1 = resnet18()
        params = torch.load('/home/zhb/Desktop/experiment/runs/resnet18/best_loss.pth')
        model = load_model(net1, params, device)
    elif name == 'vgg16':
        net3 = vgg16_bn()
        params = torch.load('../runs/vgg16/best_acc_157.pth')
        model = load_model(net3, params, device)
    elif name == 'vgg19':
        net3 = vgg19()
        params = torch.load('./roc_pth/vgg19/best_loss.pth')
        model = load_model(net3, params, device)
    elif name == 'san10':
        net4 = san(sa_type=1, layers=(2, 1, 2, 4, 1), kernels=[3, 7, 7, 7, 7], num_classes=5).cuda().eval()
        params = torch.load('./roc_pth/san10-patchwise/best_acc_190.pth')
        model = load_model(net4, params, device)
    elif name == 'san15':
        net5 = san(sa_type=1, layers=(3, 2, 3, 5, 2), kernels=[3, 7, 7, 7, 7], num_classes=5).cuda().eval()
        params = torch.load('./roc_pth/san15-patchwise/best_loss.pth')
        model = load_model(net5, params, device)
    elif name == 'vgg16_se':
        net3 = vgg_se()
        params = torch.load('/home/zhb/Desktop/experiment/runs/vgg_se/best_acc_188.pth')
        model = load_model(net3, params, device)
    elif name == 'vgg16_cbam':
        net3 = vgg_cbam()
        params = torch.load('/home/zhb/Desktop/experiment/runs/vgg_cbam/best_acc_183.pth')
        model = load_model(net3, params, device)
    elif name == 'RANet':
        net11 = Net(5)
        params = torch.load('/home/zhb/Desktop/experiment/dgx_zhb/runs/relationNet_multihead/best_acc_188.pth')
        model = load_model(net11, params, device)
    elif name == 'LA-RANet':
        net12 = Net(5)
        params = torch.load('/home/zhb/Desktop/experiment/dgx_zhb/runs/SSL_relationNet_multihead_19/best_acc_188.pth')
        model = load_model(net12, params, device)
    elif name == 'RANet_val':
        net = Net(5)
        params = torch.load('/home/zhb/Desktop/experiment/dgx_zhb/runs/final_train_RANet_resume4_val_best/best_acc_141.pth')
        model = load_model(net, params, device)
    elif name == 'RANet_test':
        net = Net(5)
        params = torch.load('/home/zhb/Desktop/experiment/dgx_zhb/runs/super_RANet/super_RANet.pth', map_location=device)
        model = load_model(net, params, device)
    elif name == 'LA_RANet_new':
        net = Net(5)
        params = torch.load('/home/zhb/Desktop/experiment/dgx_zhb/runs/super_LA_RANet/super_LA_RANet.pth', map_location=device)
        model = load_model(net, params, device)

    return model


if __name__ == '__main__':
    name = 'resnet18'

    gray_model = ['vgg19', 'san10', 'san15', 'apcnn']
    kwargs = test_kwargs('../runs/final_train_RANet/best_loss.pth')
    kwargs['method'] = name
    kwargs['data_path'] = '../EUS_bulk/test'
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    model = model_choose(name)
    if name in gray_model:
        _transforms = kwargs['gray_transforms']
    else:
        _transforms = kwargs['transforms']
    test(model, device, _transforms, **kwargs)
