import itertools
import os
import numpy as np
import pandas as pd
import random
import seaborn as sn
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torchsummary import summary
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import math
from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve, roc_curve, auc, \
    average_precision_score, precision_score, recall_score, f1_score, accuracy_score
from scipy import signal  # 添加在文件开头的import部分



class GroupBatchnorm2d(nn.Module):
    def __init__(self, c_num: int,
                 group_num: int = 16,
                 eps: float = 1e-10
                 ):
        super(GroupBatchnorm2d, self).__init__()
        assert c_num >= group_num
        self.group_num = group_num
        self.weight = nn.Parameter(torch.randn(c_num, 1, 1))
        self.bias = nn.Parameter(torch.zeros(c_num, 1, 1))
        self.eps = eps

    def forward(self, x):
        N, C, H, W = x.size()
        x = x.view(N, self.group_num, -1)
        mean = x.mean(dim=2, keepdim=True)
        std = x.std(dim=2, keepdim=True)
        x = (x - mean) / (std + self.eps)
        x = x.view(N, C, H, W)
        return x * self.weight + self.bias


class SRU(nn.Module):
    def __init__(self,
                 oup_channels: int,
                 group_num: int = 16,
                 gate_treshold: float = 0.5,
                 torch_gn: bool = True
                 ):
        super().__init__()

        self.gn = nn.GroupNorm(num_channels=oup_channels, num_groups=group_num) if torch_gn else GroupBatchnorm2d(
            c_num=oup_channels, group_num=group_num)
        self.gate_treshold = gate_treshold
        self.sigomid = nn.Sigmoid()

    def forward(self, x):
        gn_x = self.gn(x)
        w_gamma = self.gn.weight / sum(self.gn.weight)
        w_gamma = w_gamma.view(1, -1, 1, 1)
        reweigts = self.sigomid(gn_x * w_gamma)
        # Gate
        w1 = torch.where(reweigts > self.gate_treshold, torch.ones_like(reweigts), reweigts)  # 大于门限值的设为1，否则保留原值
        w2 = torch.where(reweigts > self.gate_treshold, torch.zeros_like(reweigts), reweigts)  # 大于门限值的设为0，否则保留原值
        x_1 = w1 * x
        x_2 = w2 * x
        y = self.reconstruct(x_1, x_2)
        return y

    def reconstruct(self, x_1, x_2):
        x_11, x_12 = torch.split(x_1, x_1.size(1) // 2, dim=1)
        x_21, x_22 = torch.split(x_2, x_2.size(1) // 2, dim=1)
        return torch.cat([x_11 + x_22, x_12 + x_21], dim=1)


class CRU(nn.Module):
    '''
    alpha: 0<alpha<1
    '''

    def __init__(self,
                 op_channel: int,
                 alpha: float = 1 / 2,
                 squeeze_radio: int = 2,
                 group_size: int = 2,
                 group_kernel_size: int = 3,
                 ):
        super().__init__()
        self.up_channel = up_channel = int(alpha * op_channel)
        self.low_channel = low_channel = op_channel - up_channel
        self.squeeze1 = nn.Conv2d(up_channel, up_channel // squeeze_radio, kernel_size=1, bias=False)
        self.squeeze2 = nn.Conv2d(low_channel, low_channel // squeeze_radio, kernel_size=1, bias=False)
        # up
        self.GWC = nn.Conv2d(up_channel // squeeze_radio, op_channel, kernel_size=group_kernel_size, stride=1,
                             padding=group_kernel_size // 2, groups=group_size)
        self.PWC1 = nn.Conv2d(up_channel // squeeze_radio, op_channel, kernel_size=1, bias=False)
        # low
        self.PWC2 = nn.Conv2d(low_channel // squeeze_radio, op_channel - low_channel // squeeze_radio, kernel_size=1,
                              bias=False)
        self.advavg = nn.AdaptiveAvgPool2d(1)

    def forward(self, x):
        # Split
        up, low = torch.split(x, [self.up_channel, self.low_channel], dim=1)
        up, low = self.squeeze1(up), self.squeeze2(low)
        # Transform
        Y1 = self.GWC(up) + self.PWC1(up)
        Y2 = torch.cat([self.PWC2(low), low], dim=1)
        # Fuse
        out = torch.cat([Y1, Y2], dim=1)
        out = F.softmax(self.advavg(out), dim=1) * out
        out1, out2 = torch.split(out, out.size(1) // 2, dim=1)
        return out1 + out2


import numpy as np
import torch
from torch import nn
from torch.nn import init
from collections import OrderedDict


class EEGNetModel(nn.Module): # EEGNET-8,2
    def __init__(self, chans=22, classes=4, time_points=1001, temp_kernel=32,
                 f1=16, f2=32, d=2, pk1=8, pk2=16, dropout_rate=0.5, max_norm1=1, max_norm2=0.25):
        super(EEGNetModel, self).__init__()
        # Calculating FC input features
        linear_size = (time_points//(pk1*pk2))*f2

        # Temporal Filters
        self.block1 = nn.Sequential(
            nn.Conv2d(1, f1, (1, temp_kernel), padding='same', bias=False),
            nn.BatchNorm2d(f1),
        )
        # Spatial Filters
        self.block2 = nn.Sequential(
            nn.Conv2d(f1, d * f1, (chans, 1), groups=f1, bias=False), # Depthwise Conv
            nn.BatchNorm2d(d * f1),
            nn.ELU(),
            nn.AvgPool2d((1, pk1)),
            nn.Dropout(dropout_rate)
        )
        self.block3 = nn.Sequential(
            nn.Conv2d(d * f1, f2, (1, 16),  groups=f2, bias=False, padding='same'), # Separable Conv
            nn.Conv2d(f2, f2, kernel_size=1, bias=False), # Pointwise Conv
            nn.BatchNorm2d(f2),
            nn.ELU(),
            nn.AvgPool2d((1, pk2)),
            nn.Dropout(dropout_rate)
        )
        self.flatten = nn.Flatten()
        self.fc = nn.Linear(linear_size, classes)

        # Apply max_norm constraint to the depthwise layer in block2
        self._apply_max_norm(self.block2[0], max_norm1)

        # Apply max_norm constraint to the linear layer
        self._apply_max_norm(self.fc, max_norm2)

    def _apply_max_norm(self, layer, max_norm):
        for name, param in layer.named_parameters():
            if 'weight' in name:
                param.data = torch.renorm(param.data, p=2, dim=0, maxnorm=max_norm)

    def forward(self, x):
        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.flatten(x)
        x = self.fc(x)
        return x


class TrainModel():
    def __init__(self,):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    def train_model(self, model, train_dataset, test_dataset, learning_rate=0.001, batch_size=64, epochs=500, save_root=None):
        model = model.to(self.device)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
        test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
        
        # 记录训练过程
        train_accuracies = []
        test_accuracies = []
        train_losses = []

        random.seed(56)
        np.random.seed(56)
        torch.manual_seed(56)
        torch.cuda.manual_seed(56)

        for epoch in range(epochs):
            # 训练阶段
            model.train()
            running_loss = 0.0
            correct = 0
            total = 0
            for inputs, labels, pids in train_loader:
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)

                optimizer.zero_grad()
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                running_loss += loss.item() * inputs.size(0)
                _, predicted = torch.max(outputs, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

            epoch_loss = running_loss / len(train_loader.dataset)
            train_accuracy = correct / total * 100
            
            # 测试阶段
            model.eval()
            test_correct = 0
            test_total = 0
            with torch.no_grad():
                for inputs, labels, pids in test_loader:
                    inputs = inputs.to(self.device)
                    labels = labels.to(self.device)
                    outputs = model(inputs)
                    _, predicted = torch.max(outputs, 1)
                    test_total += labels.size(0)
                    test_correct += (predicted == labels).sum().item()
            
            test_accuracy = test_correct / test_total * 100

            # 记录结果
            train_accuracies.append(train_accuracy)
            test_accuracies.append(test_accuracy)
            train_losses.append(epoch_loss)

            print(f"Epoch {epoch+1}/{epochs}, Loss: {epoch_loss:.4f}, Train Acc: {train_accuracy:.2f}%, Test Acc: {test_accuracy:.2f}%")

        # 绘制训练曲线
        plt.figure(figsize=(12, 5))
        
        # 准确率曲线
        plt.subplot(1, 2, 1)
        plt.plot(train_accuracies, label='Train Accuracy')
        plt.plot(test_accuracies, label='Test Accuracy')
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy (%)')
        plt.title('Training and Testing Accuracy')
        plt.legend()
        
        # 损失曲线
        plt.subplot(1, 2, 2)
        plt.plot(train_losses, label='Train Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Training Loss')
        plt.legend()
        
        plt.tight_layout()
        plt.savefig(os.path.join(save_root, 'training_process.png'))
        plt.show()
        plt.close()

        # 保存模型
        torch.save(model.state_dict(), os.path.join(save_root, 'model.pth'))
        return model


class EvalModel():
    def __init__(self, model):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = model.to(self.device)

    # def test_model(self, test_dataset):
    #     self.model.eval()
    #     correct = 0
    #     total = 0
    #     test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
    #
    #     labelss = []
    #     predicteds = []
    #     pidss = []
    #     with torch.no_grad():
    #         for inputs, labels,pids in test_loader:
    #             inputs = inputs.to(self.device)
    #             labels = labels.to(self.device)
    #             pids = pids.to(self.device)
    #
    #             outputs = self.model(inputs)
    #             _, predicted = torch.max(outputs.data, 1)
    #             total += labels.size(0)
    #             correct += (predicted == labels).sum().item()
    #             labelss.append(labels.item())
    #             predicteds.append(predicted.item())
    #             pidss.append(pids.item())
    #
    #     ids = sorted(set(pidss))
    #     for i in ids:
    #         index = np.where(np.array(pidss) == i)
    #         l = np.array(labelss)[index]
    #         p = np.array(predicteds)[index]
    #         acc = np.sum(l == p) / len(l) * 100
    #         print(f"{i}: {acc:.2f}%")
    #
    #     accuracy = (correct / total) * 100
    #     print(f"avg Test Accuracy: {accuracy:.2f}%")
    #     return accuracy

    def cal_metrics_and_plot_fig(self, test_dataset, classes, save_root=None):
        self.model.eval()
        y_pred = []
        y_true = []
        test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
        correct = 0
        total = 0

        labelss = []
        predicteds = []
        pidss = []
        pre_p = []

        with torch.no_grad():
            for inputs, labels, pids in test_loader:
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs = self.model(inputs)
                _, predicted = torch.max(outputs.data, 1)
                y_pred.append(predicted.item())
                y_true.append(labels.item())
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
                labelss.append(labels.item())
                predicteds.append(predicted.item())
                pidss.append(pids.item())
                pre_p.append(torch.softmax(outputs, dim=1).cpu().numpy()[0].tolist())

        # 确保标签值在正确范围内
        y_true = np.array(y_true)
        y_pred = np.array(y_pred)
        n_classes = len(classes)
        
        # 裁剪标签值到正确范围
        y_true = np.clip(y_true, 0, n_classes-1)
        y_pred = np.clip(y_pred, 0, n_classes-1)

        ids = sorted(set(pidss))
        for iiii in ids:
            print(f"{iiii}: -----------------------------------------------")
            index = np.where(np.array(pidss) == iiii)
            l = np.array(labelss)[index]
            p = np.array(predicteds)[index]
            pp = np.array(pre_p)[index]
            
            # 裁剪每个被试的标签值
            l = np.clip(l, 0, n_classes-1)
            p = np.clip(p, 0, n_classes-1)
            
            acc = np.sum(l == p) / len(l) * 100
            print(f"{iiii}: acc : {acc:.2f}%")

            # 计算混淆矩阵
            cf_matrix = confusion_matrix(l, p, labels=range(n_classes))
            cf_matrix = cf_matrix.astype('float') / (cf_matrix.sum(axis=1)[:, np.newaxis] + 1e-6)  # 添加小值避免除零
            df_cm = pd.DataFrame(cf_matrix, index=classes, columns=classes)

            plt.figure(figsize=(10, 7))
            sn.heatmap(df_cm, annot=True, cmap='Blues', fmt='.2f')
            plt.xlabel('Predicted labels')
            plt.ylabel('True labels')
            plt.title('Confusion Matrix acc=%.2f' % (acc))
            plt.savefig(os.path.join(save_root, '%d_confusion_matrix_model.png'%(iiii)))
            plt.show()
            plt.close()

            true_classes = l
            pre_classes = p

            # 绘制pr曲线
            true_classes_onehot = np.eye(n_classes)[true_classes]
            precision = dict()
            recall = dict()
            average_precision = dict()
            for i in range(n_classes):
                precision[i], recall[i], _ = precision_recall_curve(true_classes_onehot[:, i], pp[:, i])
                average_precision[i] = average_precision_score(true_classes_onehot[:, i], pp[:, i])
            precision["macro"], recall["macro"], _ = precision_recall_curve(true_classes_onehot.ravel(), pp.ravel())
            average_precision["macro"] = average_precision_score(true_classes_onehot, pp, average="macro")
            print(
                'Average precision score, macro-averaged over all classes: {0:0.2f}'.format(average_precision["macro"]))
            plt.figure()
            plt.step(recall['macro'], precision['macro'], where='post')
            plt.xlabel('Recall')
            plt.ylabel('Precision')
            plt.ylim([0.0, 1.05])
            plt.xlim([0.0, 1.0])
            plt.title(
                'Average precision score, macro-averaged over all classes: AP={0:0.3f}'.format(
                    average_precision["macro"]))
            plt.savefig(os.path.join(save_root,'%d_Precision-Recall.png'%(iiii)))
            plt.show()
            plt.close()

            # 绘制roc曲线
            fpr = dict()
            tpr = dict()
            roc_auc = dict()
            for i in range(n_classes):
                fpr[i], tpr[i], _ = roc_curve(true_classes_onehot[:, i], pp[:, i])
                roc_auc[i] = auc(fpr[i], tpr[i])
            # Compute micro-average ROC curve and ROC area
            fpr[2], tpr[2], _ = roc_curve(true_classes_onehot.ravel(), pp.ravel())
            roc_auc[2] = auc(fpr[2], tpr[2])
            plt.figure()
            lw = 2
            plt.plot(fpr[2], tpr[2], color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
            plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
            plt.xlim([0.0, 1.0])
            plt.ylim([0.0, 1.05])
            plt.xlabel('False Positive Rate')
            plt.ylabel('True Positive Rate')
            plt.title('Receiver operating characteristic example')
            plt.legend(loc="lower right")
            plt.show()
            plt.savefig(os.path.join(save_root,'%d_roc.png'%(iiii)))
            plt.close()

            # auc值为roc曲线下面积
            auc_v = auc(fpr[2], tpr[2])
            print('auc:', auc_v)

            p = precision_score(pre_classes, true_classes, average='weighted')
            print('precision_score', p)
            r = recall_score(pre_classes, true_classes, average='weighted')
            print('recall_score', r)
            f1 = f1_score(pre_classes, true_classes, average='weighted')
            print('f1_score', f1)
            accuracy = accuracy_score(pre_classes, true_classes)
            print('accuracy', accuracy)

        print('avg: -----------------------------------')

        # 确保所有标签值在正确范围内
        labelss = np.array(labelss)
        predicteds = np.array(predicteds)
        pre_p = np.array(pre_p)
        
        # 裁剪标签值
        labelss = np.clip(labelss, 0, n_classes-1)
        predicteds = np.clip(predicteds, 0, n_classes-1)

        accuracy = (correct / total) * 100

        # 计算总体混淆矩阵
        cf_matrix = confusion_matrix(labelss, predicteds, labels=range(n_classes))
        cf_matrix = cf_matrix.astype('float') / (cf_matrix.sum(axis=1)[:, np.newaxis] + 1e-6)

        df_cm = pd.DataFrame(cf_matrix, index=classes, columns=classes)

        plt.figure(figsize=(10, 7))
        sn.heatmap(df_cm, annot=True, cmap='Blues', fmt='.2f')
        plt.xlabel('Predicted labels')
        plt.ylabel('True labels')
        plt.title('Confusion Matrix acc=%.2f'%(accuracy))
        plt.savefig(os.path.join(save_root,'confusion_matrix_model.png'))
        plt.show()

        true_classes = labelss
        pre_classes = predicteds

        n_classes = cf_matrix.shape[0]

        # 绘制pr曲线
        true_classes_onehot = np.eye(n_classes)[true_classes]
        precision = dict()
        recall = dict()
        average_precision = dict()
        for i in range(n_classes):
            precision[i], recall[i], _ = precision_recall_curve(true_classes_onehot[:, i], pre_p[:, i])
            average_precision[i] = average_precision_score(true_classes_onehot[:, i], pre_p[:, i])
        # (2) A "macro-average": quantifying score on all classes jointly
        precision["macro"], recall["macro"], _ = precision_recall_curve(true_classes_onehot.ravel(), pre_p.ravel())
        average_precision["macro"] = average_precision_score(true_classes_onehot, pre_p, average="macro")
        print('Average precision score, macro-averaged over all classes: {0:0.2f}'.format(average_precision["macro"]))
        plt.figure()
        plt.step(recall['macro'], precision['macro'], where='post')
        plt.xlabel('Recall')
        plt.ylabel('Precision')
        plt.ylim([0.0, 1.05])
        plt.xlim([0.0, 1.0])
        plt.title(
            'Average precision score, macro-averaged over all classes: AP={0:0.3f}'.format(average_precision["macro"]))
        plt.savefig(os.path.join(save_root,'Precision-Recall.png'))
        plt.show()
        plt.close()

        # 绘制roc曲线
        fpr = dict()
        tpr = dict()
        roc_auc = dict()
        for i in range(n_classes):
            fpr[i], tpr[i], _ = roc_curve(true_classes_onehot[:, i], pre_p[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
        # Compute micro-average ROC curve and ROC area
        fpr[2], tpr[2], _ = roc_curve(true_classes_onehot.ravel(), pre_p.ravel())
        roc_auc[2] = auc(fpr[2], tpr[2])
        plt.figure()
        lw = 2
        plt.plot(fpr[2], tpr[2], color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
        plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver operating characteristic example')
        plt.legend(loc="lower right")
        plt.show()
        plt.savefig(os.path.join(save_root,'roc.png'))
        plt.close()

        # auc值为roc曲线下面积
        auc_v = auc(fpr[2], tpr[2])
        print('auc:', auc_v)

        p = precision_score(pre_classes, true_classes, average='weighted')
        print('precision_score', p)
        r = recall_score(pre_classes, true_classes, average='weighted')
        print('recall_score', r)
        f1 = f1_score(pre_classes, true_classes, average='weighted')
        print('f1_score', f1)
        accuracy = accuracy_score(pre_classes, true_classes)
        print('accuracy', accuracy)


class Config:
    batch_size = 8
    lr = 0.001
    epochs = 600

def butterfiter(data):
    """
    对数据进行带通滤波
    data shape: (trials, channels, samples)
    """
    Fs = 250
    b, a = signal.butter(4, [3, 30], 'bandpass', fs=Fs)
    
    filtered_data = np.zeros_like(data)
    for i in range(data.shape[0]):  # 对每个trial进行处理
        for j in range(data.shape[1]):  # 对每个通道进行处理
            filtered_data[i, j] = signal.filtfilt(b, a, data[i, j])
    
    return filtered_data

if __name__ == '__main__':
    # 修改数据和结果保存路径
    data_root = '/root/autodl-tmp/datas'
    save_root = '/root/autodl-tmp/eegnet_results'
    model_path = '/root/autodl-tmp/model.pth'
    os.makedirs(save_root, exist_ok=True)
    
    # 使用Config中的超参数
    EPOCHS = Config.epochs
    BATCH_SIZE = Config.batch_size
    LEARNING_RATE = Config.lr
    input_size = (1, 22, 1001)
    stage = 'train'

    # 加载数据
    X = np.load(os.path.join(data_root, 'x.npy'))
    y = np.load(os.path.join(data_root, 'y.npy'))
    pid = np.load(os.path.join(data_root, 'pid.npy'))

    # 只选择受试者3的数据
    subject_mask = pid == 3
    X = X[subject_mask]
    y = y[subject_mask]
    pid = pid[subject_mask]

    # 打印原始数据形状
    print("Data shape before filtering:", X.shape)
    
    # 应用带通滤波器
    X = butterfiter(X)
    
    # 打印滤波后数据形状
    print("Data shape after filtering:", X.shape)

    # 确保标签值在0-3范围内
    y = y - 1  # 如果原始标签是1-4，减1使其变为0-3
    y = np.clip(y, 0, 3)  # 确保所有值都在0-3范围内

    # 打印数据信息
    print("Selected subject data shape:", X.shape)
    print("Label range:", np.min(y), "to", np.max(y))
    print("Unique labels:", np.unique(y))

    random.seed(666)
    np.random.seed(666)
    torch.manual_seed(666)
    torch.cuda.manual_seed(666)

    # 数据分割
    X_train, X_test, y_train, y_test, pid_train, pid_test = train_test_split(
        X, y, pid, test_size=0.2, random_state=666, stratify=y)

    # 转换为tensor
    X_train = torch.Tensor(X_train).unsqueeze(1)
    X_test = torch.Tensor(X_test).unsqueeze(1)
    y_train = torch.LongTensor(y_train)
    y_test = torch.LongTensor(y_test)
    pid_train = torch.LongTensor(pid_train)
    pid_test = torch.LongTensor(pid_test)

    # 再次检查标签范围
    print("Train labels range:", torch.min(y_train).item(), "to", torch.max(y_train).item())
    print("Test labels range:", torch.min(y_test).item(), "to", torch.max(y_test).item())

    # 打印数据集大小
    print("Size of X_train:", X_train.size())
    print("Size of X_test:", X_test.size())
    print("Size of y_train:", y_train.size())
    print("Size of y_test:", y_test.size())

    # 创建数据集
    train_dataset = TensorDataset(X_train, y_train, pid_train)
    test_dataset = TensorDataset(X_test, y_test, pid_test)

    # 初始化和训练模型
    eegnet_model = EEGNetModel()
    summary(eegnet_model, input_size, device='cpu')

    if stage=='train':
        trainer = TrainModel()
        trained_eegnet_model = trainer.train_model(
            eegnet_model, train_dataset, test_dataset, learning_rate=LEARNING_RATE,
            batch_size=BATCH_SIZE, epochs=EPOCHS, save_root=save_root)
        torch.save(trained_eegnet_model.state_dict(), os.path.join(save_root,'model.pth'))
    else:
        trained_eegnet_model = eegnet_model
        trained_eegnet_model.load_state_dict(torch.load(model_path))

        classes_list = ['Left', 'Right', 'Foot', 'Tongue']
        eval_model = EvalModel(trained_eegnet_model)
        eval_model.cal_metrics_and_plot_fig(test_dataset, classes_list, save_root=save_root)

# Test Accuracy: 69.43%
