import torch.utils.data as Data
import torch
import torchvision
from joblib import dump, load
import torch.nn as nn
import os
import PIL
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.manifold import TSNE
import seaborn as sns
import numpy as np
import warnings
import csv
from matplotlib import MatplotlibDeprecationWarning
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score,cohen_kappa_score,roc_auc_score
from sklearn import metrics
warnings.filterwarnings('ignore', category=MatplotlibDeprecationWarning)

torch.manual_seed(100) # 设置随机种子，以使实验结果具有可重复性
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 模型 测试集 测试
def model_test(model, test_loader, batch_size):
    true_labels = [] # 存储类别标签
    predicted_labels = [] # 存储预测标签
    predicted_pro = []
    model = model.to(device)

    # 定义损失函数和优化函数
    loss_function = nn.CrossEntropyLoss()
    # 每一个epoch后，在验证集上验证实验结果
    with torch.no_grad():
        loss_test = 0.
        correct_test = 0
        # print(len(test_loader.dataset))
        for j, (data, label) in enumerate(test_loader):
            # 将模型者设置为评估模型
            model.eval()

            data, label = data.to(device), label.to(device)
            pre = model(data)

            correct_test += torch.sum(pre.argmax(dim=1).view(-1)==label.view(-1)).item()
            loss = loss_function(pre, label)
            loss_test += loss.item()
            # 计算个数
            predicted = torch.argmax(pre, dim=1)
            true_labels.extend(label.tolist())
            predicted_pro.extend(pre.tolist())
            predicted_labels.extend(predicted.tolist())
        print(f'test_loss:{loss_test/len(test_loader)}, test_Acc:{correct_test/len(test_loader.dataset)}%')
        return true_labels, predicted_labels, predicted_pro
class Evaluation():
    def __init__(self, true_labels, predicted_labels, predicted_pro):
        self.true_labels = np.array(true_labels)
        self.predicted_labels = np.array(predicted_labels)
        self.predicted_pro = np.array(predicted_pro)
        # print(self.predicted_pro)
        # print(self.predicted_labels.shape)

    def accuracy_score(self):
        acc = accuracy_score(y_true=self.true_labels, y_pred=self.predicted_labels)
        return acc
    def f1_score(self):
        f1 = metrics.recall_score(y_true=self.true_labels, y_pred=self.predicted_labels, average='weighted')
        return f1
    def kappa_score(self):
        kappa = cohen_kappa_score(self.true_labels, self.predicted_labels)
        return kappa
    def AUC_score(self):
        auc = roc_auc_score(self.true_labels, self.predicted_pro,multi_class='ovr')
        return auc


if __name__=='__main__':
    # 加载模型
    param_toolboxPath = r'D:\Project_mb\modelParameters\BCI42a\StarNet'
    result_toolboxPath = r'D:\Project_mb\CWTResult\BCI42a\StarNet'
    subjects = [f'sub0{i + 1}' for i in range(9)]
    Evaluation_leader = ['acc', 'kappa','F1_Score']
    eval_results = {'leader':Evaluation_leader}

    for i in range(len(subjects)):
        sub_result = []
        data_dir = os.path.join(result_toolboxPath, subjects[i])
        model_dir = os.path.join(param_toolboxPath, subjects[i])
        model = torch.load(os.path.join(model_dir, 'CWT_best_model.pt'), map_location=torch.device('cpu'))

        # 加载测试集
        test_loader = load(os.path.join(data_dir,'val_loader.pkl'))
        # print(len(test_loader.dataset))
        batch_size = 32
        # 模型测试
        true_labels, predicted_labels, predicted_pro = model_test(model, test_loader, batch_size)
        # print(np.array(test_original_features).shape)
        # print(np.array(test_original_features).shape)
        # print(np.array(tes_original_labels).shape)

        eval = Evaluation(true_labels, predicted_labels, predicted_pro)
        sub_result.append(eval.accuracy_score())
        sub_result.append(eval.kappa_score())
        sub_result.append(eval.f1_score())
        eval_results[subjects[i]] = sub_result
    with open(os.path.join(result_toolboxPath,'evaluations.csv'),'w',newline='') as file:
        writer = csv.writer(file)
        for row in eval_results.items():
            writer.writerow(row)
        file.close()