import torch
from torchvision import transforms, datasets
from model import resnet34, resnet101
import numpy as np
from vgg import vgg16_bn
from ViT import vit_base_patch16_224_in21k
from Hilo_vgg import vgg16_hilo
import os 
import argparse
from Hilo_Resnet import Model as hilo_resnet
from sklearn.metrics import roc_auc_score, confusion_matrix, classification_report
from Traditional_Fourier import hilo as traditional_hilo
from Fourier import hilo


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--modelname', type=str,  default = 'Traditional_Hilo')
    args = parser.parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)


    modelname = args.modelname
    model = {'Hilo':hilo, 'Resnet34':resnet34, 'Vgg16':vgg16_bn, 'ViT':vit_base_patch16_224_in21k, 'Hilo_vgg':vgg16_hilo, 'Hilo_Resnet': hilo_resnet,
            'Traditional_Hilo':traditional_hilo}
    print(model.keys())
    try:
        net = model[modelname]()
    except:
        net = model[modelname]

   
        
    net.load_state_dict(torch.load('./model/'+ modelname+'.pth'))

    data_transform = transforms.Compose([transforms.ToTensor()])
    data_root = os.getcwd()
    image_path = "./archive/"  # flower data set path

    test_dataset = datasets.ImageFolder(root=image_path + "test", transform=data_transform)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                                batch_size=32, shuffle=False,
                                                num_workers=0)
    modelname = args.modelname
    net = model[modelname]()
    net.load_state_dict(torch.load('./model/' + modelname+'.pth'))
    net.to(device)

    acc = 0.0
    net.eval()
    with torch.no_grad():
        """
        calculate loss and OA
        """
        y_true = []
        y_scores = []
        y_pred = []

        for test_data in test_loader:
            test_images, test_labels = test_data
            outputs = net(test_images.to(device))  # eval model only have last output layer
            
            outputs_logit = torch.softmax(outputs, dim=-1)
            
            y_pred_batch = torch.max(outputs_logit, dim=1)[1]
            y_pred.extend(list(y_pred_batch.cpu().numpy()))
            y_scores_batch = outputs_logit[:, 1]
            y_scores.extend(list(y_scores_batch.cpu().numpy()))
            y_true.extend(test_labels.cpu().numpy())

        
        # 计算混淆矩阵
        tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()

        # 计算分类精度敏感性和特异性
        auc = roc_auc_score(y_true, y_scores)
        sensitivity = tp / (tp + fn)
        specificity = tn / (tn + fp)
        acc = (tn + tp) / (tn + tp + fp + fn)


        print("ACC: {:.2%} AUC: {:.2%} : sensitivity {:.2%} specificity: {:.2%}".format(acc, auc, sensitivity, specificity))
        
        print(classification_report(y_true, y_pred))
       
        



         
      