import pathlib
import pandas as pd
import numpy as np
import torch
import torchvision
from joblib import dump, load
import torch.nn as nn
import time
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from torchvision import datasets
import os
import PIL
import torch.utils.data as Data
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from matplotlib import MatplotlibDeprecationWarning
warnings.filterwarnings("ignore", category=MatplotlibDeprecationWarning)
import sys
sys.path.append("/home/x/Documents/CalculusBrain")
from Codes.mydataset import CustomDataset
# 导入模型
from Codes.model_v2 import MobileNetV2
from Codes.Mymodel import SwinCNNGAMModel
def create_dir_if_not_exists(directory):
    if not os.path.exists(directory):
        os.mkdir(directory)
def moedel_train(train_loader, val_loader, model, parameter, name, savePath):
    '''
        参数
        train_loader：训练集
        val_loader：验证集
        model：模型
        parameter： 参数
        返回
    '''
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    # 参数
    batch_size = parameter['batch_size']
    epochs = parameter['epochs']
    # 定义损失函数和优化函数
    loss_function = nn.CrossEntropyLoss(reduction='sum')  # loss
    optimizer = torch.optim.Adam(model.parameters(), lr=parameter['learn_rate'])  # 优化器
    # 初始化
    train_size = len(train_loader) * batch_size
    val_size = len(val_loader) * batch_size
    # 最高准确率  最佳模型 最后的模型
    best_accuracy = 0.0
    best_model = model
    last_model = model

    train_loss = []  # 记录在训练集上每个epoch的loss的变化情况
    train_acc = []  # 记录在训练集上每个epoch的准确率的变化情况
    validate_acc = []  # 记录在验证集上每个epoch的loss的变化情况
    validate_loss = []  # 记录在验证集上每个epoch的准确率的变化情况
    print('*'*20, '开始训练', '*'*20)
    # 计算模型运行时间
    start_time = time.time()
    for epoch in range(epochs):
        # 训练
        model.train()

        print(f"epoch--:{epoch+1}")
        loss_epoch = 0.  # 保存当前epoch的loss和
        correct_epoch = 0  # 保存当前epoch的正确个数和
        for j, (seq, labels) in enumerate(train_loader):

            seq, labels = seq.to(device), labels.to(device)
            # print(seq.shape)
            # 每次更新参数前都梯度归零和初始化
            optimizer.zero_grad()
            # 前向传播
            y_pred = model(seq)  # 压缩维度：得到输出，并将维度为1的去除   torch.Size([256, 10])

            # 计算当前batch预测正确个数
            correct_epoch += torch.sum(y_pred.argmax(dim=1).view(-1) == labels.view(-1)).item()
            # 损失计算
            loss = loss_function(y_pred, labels)
            loss_epoch += loss.item()
            # 反向传播和参数更新
            loss.backward()
            optimizer.step()

        # 在每个 epoch 结束时调用学习率调整器
        # scheduler.step()
        # 计算准确率
        train_Accuracy = correct_epoch / train_size
        train_loss.append(loss_epoch / train_size)
        train_acc.append(train_Accuracy)
        print(f'Epoch: {epoch + 1:2} train_Loss: {loss_epoch / train_size:10.8f} train_Accuracy:{train_Accuracy:4.4f}')
        # 每一个epoch结束后，在验证集上验证实验结果。
        with torch.no_grad():
            loss_validate = 0.
            correct_validate = 0
            for j, (data, label) in enumerate(val_loader):
                data, label = data.to(device), label.to(device)
                pre = model(data)
                # 计算当前batch预测正确个数
                correct_validate += torch.sum(pre.argmax(dim = 1).view(-1) == label.view(-1)).item()
                loss = loss_function(pre, label)
                loss_validate += loss.item()

            val_accuracy = correct_validate / val_size
            print(f'Epoch: {epoch + 1:2} val_Loss:{loss_validate / val_size:10.8f},  validate_Acc:{val_accuracy:4.4f}')
            validate_loss.append(loss_validate / val_size)
            validate_acc.append(val_accuracy)
            # 如果当前模型的准确率优于之前的最佳准确率，则更新最佳模型
            # 保存当前最优模型参数
            if val_accuracy > best_accuracy:
                best_accuracy = val_accuracy
                best_model = model  # 更新最佳模型的参数

    last_model = model
    print('*' * 20, '训练结束', '*' * 20)
    print(f'\nDuration: {time.time() - start_time:.0f} seconds')
    print(f'best_accuracy: {best_accuracy}')

    # 可视化
    # 创建训练损失、准确率图
    plt.figure(figsize=(14, 7), dpi=100)  # dpi 越大  图片分辨率越高，写论文的话 一般建议300以上设置
    plt.plot(range(epochs), train_loss, color='blue', marker='o', label='Train-loss')
    plt.plot(range(epochs), train_acc, color='green', marker='*', label='Train-accuracy')
    plt.plot(range(epochs), validate_loss, color='red', marker='+', label='Validate_loss')
    plt.plot(range(epochs), validate_acc, color='orange', marker='x', label='Validate_accuracy')

    plt.xlabel('Epochs', fontsize=12)
    plt.ylabel('Loss-Accuracy', fontsize=12)
    plt.xticks(fontsize=10)
    plt.yticks(fontsize=10)
    plt.legend(fontsize=12)
    plt.title(f"{name['tfr_method']} + {name['model']} model training visualization", fontsize=16)
    # plt.show()  # 显示 lable
    # 保存结果 方便 后续画图处理（如果有需要的话）
    # 保存结果 方便 后续画图处理（如果有需要的话）
    dict = {'train_acc':train_acc, 'train_loss':train_loss, 'validate_acc':validate_acc, 'validate_loss':validate_loss}
    df = pd.DataFrame(dict)
    df.to_csv(os.path.join(savePath,'epochs__result.csv'), index=False, encoding='gbk')
    # torch.save(train_loss, os.path.join(savePath, 'train_loss.csv'))
    # torch.save(train_acc, os.path.join(savePath, 'train_acc.csv'))
    # torch.save(validate_loss, os.path.join(savePath, 'validate_loss.csv'))
    # torch.save(validate_acc, os.path.join(savePath, 'validate_acc.csv'))

    # 保存训练图
    plt.savefig(os.path.join(savePath,f"{name['tfr_method']} + {name['model']} model Train"), dpi=100)
    return last_model, best_model, best_accuracy


if __name__ == '__main__':
    # 参数与配置
    torch.manual_seed(100)  # 设置随机种子，以使实验结果具有可重复性

    # 制作数据集
    # 数据集路径
    toolboxPath = r'D:\Project_mb\CWTImages\BCI42a'
    subjects = [f'sub0{i+1}' for i in range(9)]
    best_acc = []
    for i in subjects:
        subject = i
        data_path = os.path.join(toolboxPath,subject)
        data_dir = pathlib.Path(data_path)
        data_paths = list(data_dir.glob('*'))
        classNames = [str(path).split('\\')[1] for path in data_paths]
        data_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(
                mean = [0.485, 0.456, 0.406],
                std = [0.229, 0.224, 0.225]
            )
        ])
        total_data = datasets.ImageFolder(data_path, transform=data_transform)

        train_size = int(0.9*len(total_data))
        val_size = len(total_data) - train_size
        train_dataset,val_dataset = torch.utils.data.random_split(total_data,[train_size, val_size])
        # print(train_dataset)
        # train_path = os.path.join(toolboxPath,subject+'/train/')
        # val_path =  os.path.join(toolboxPath,subject+'/test/')
        # # 划分图片训练集、验证集、测试集
        # train_dataset = CustomDataset(train_path)
        # val_dataset = CustomDataset(val_path)

        # 训练参数设置
        batch_size = 32
        epochs = 200 # 自己训练的时候适当大一些（50-100）
        learn_rate = 0.0003
        num_classes = 4 # 十分类
        # 制作参数字典
        parameter = {
            'batch_size': batch_size,
            'output_size': num_classes,
            'epochs': epochs,
            'learn_rate': learn_rate,
            'subject':subject
        }
        # 制作name字典
        name = {'tfr_method':'CWT', 'model':'MobilenetV2'}
        # Swin Transformer 参数
        # img_size = 224  # 输入图像尺寸为 224x224
        # patch_size = 4  # 补丁尺寸为 4x4
        # embed_dim = 100
        # # depths = (2, 2, 6, 2)  原始论文参数
        # # num_heads = (3, 6, 12, 24) 原始论文参数
        # depths = (1,)
        # num_heads = (3,)         # 层数 和每层 注意力头数 可灵活设置
        # # depths = (1, 1, 1, 1)
        # # num_heads = (3, 6, 12, 24)
        # window_size = 4  # 窗口尺寸为 7X7，224的约数
        # mlp_ratio = 4.0


        # 定义模型
        # 创建 SwinCNNGAModel 模型
        # model = SwinCNNGAMModel(
        #     img_size=img_size,  # 输入图像尺寸为 224x224
        #     patch_size=patch_size,  # 补丁尺寸为 4x4
        #     in_chans=3,  # 输入图像通道数为 3
        #     num_classes=num_classes,  #  10 个类别
        #     embed_dim=embed_dim,
        #     depths=depths,
        #     num_heads=num_heads,
        #     window_size=window_size,  # 窗口尺寸为 7X7，224的约数
        #     mlp_ratio=mlp_ratio,
        #     qkv_bias=True,
        #     qk_scale=None,
        #     drop_rate=0.,
        #     attn_drop_rate=0.,
        #     drop_path_rate=0.1,
        #     norm_layer=nn.LayerNorm,
        #     ape=False,
        #     patch_norm=True,
        #     use_checkpoint=False
        # )
        model = MobileNetV2(input_n=3, num_classes=4)
        # 加载数据
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
                                                   shuffle=True, pin_memory=True, num_workers=2, drop_last=True)
        val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size,
                                                 shuffle=True, pin_memory=True, num_workers=2, drop_last=True)

        # test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,
        #                                           shuffle=True, pin_memory=True, num_workers=0, drop_last=True)
        # 保存测试数据
        # dump(test_loader, 'test_loader')
        SavePath = os.path.join(r'D:\Project_mb\CWTResult\BCI42a\MobilenetV2', subject)
        create_dir_if_not_exists(SavePath)
        dump(val_loader, os.path.join(SavePath, 'val_loader.pkl'))
        last_model, best_model, best_accuracy = moedel_train(train_loader, val_loader, model, parameter, name, SavePath)
        best_acc.append(best_accuracy)
        # 保存最后的参数
        parameters_path =os.path.join( r'D:\Project_mb\modelParameters\BCI42a\MobilenetV2', parameter['subject'])
        create_dir_if_not_exists(parameters_path)
        torch.save(last_model, os.path.join(parameters_path,f"{name['tfr_method']}_final_model.pt"))
        # 保存最好的参数
        torch.save(best_model, os.path.join(parameters_path,f"{name['tfr_method']}_best_model.pt"))

        # 注意 如果设备 带不动  把  num_workers 设置为 0 试一试 ！！！
        # 出现警告请忽略， 只要不报错， 就等待 程序运行完  就行， 耗时约30分钟（跟自己设备相关）
    np.savetxt(os.path.join(r'D:\Project_mb\CWTResult\BCI42a\MobilenetV2', 'best_acc.txt'),best_acc)






