import copy
import time
import torch
import torch.nn as nn
from torchvision.datasets import FashionMNIST
from torchvision import transforms
import torch.utils.data as Data
import pandas as pd
import matplotlib.pyplot as plt
from model import LeNet


def train_val_data_process():  # 训练——验证下载、处理函数
    train_data = FashionMNIST(root='D:/Train/data',
                              train=True,
                              transform=transforms.Compose([transforms.Resize(size=28), transforms.ToTensor()]),
                              download=True)
    train_data, val_data = Data.random_split(train_data, [round(0.8*len(train_data)), round(0.2*len(train_data))])  # 划分训练集与验证集

    # 数据打包，以32为一组捆起来
    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=32,
                                   shuffle=True,
                                   num_workers=2)
    val_loader = Data.DataLoader(dataset=val_data,
                                   batch_size=32,
                                   shuffle=True,
                                   num_workers=2)
    return train_loader, val_loader
'''
def train_model_process(model, train_dataloader, val_dataloader, num_epochs):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 检查设备
    # Adam梯度下降法,学习率为0.01
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
    # 交叉熵损失函数
    criterion = nn.CrossEntropyLoss()
    # 将模型放到训练设备中
    model = model.to(device)
    # 复制当前模型的参数
    best_model_wts = copy.deepcopy(model.state_dict())

    # 初始化参数
    # 最高准确度
    best_acc = 0.0
    # 训练集loss列表
    train_loss_all = []
    # 测试集loss列表
    val_loss_all = []
    # 训练集精度列表
    train_acc_all = []
    # 测试集精度列表
    val_acc_all =[]
    # 当前时间
    since = time.time()

    for epoch in range(num_epochs):
        print("Epoch {}/{}".format(epoch, num_epochs-1))
        print("-"*10)


        # 初始化参数
        # 训练集loss
        train_loss = 0.0
        # 训练集精确度
        train_corrects = 0
        # 测试集loss
        val_loss = 0.0
        # 测试集精确度
        val_corrects = 0
        # 训练集样本数量
        train_num = 0
        # 测试集样本数量
        val_num = 0

        # 对每一个mini-batch进行训练和计算
        for step, (b_x, b_y) in enumerate(train_dataloader):
            # 特征放入设备中
            b_x = b_x.to(device)
            # 标签放入设备中
            b_y = b_y.to(device)
            # 设置模型为训练模式
            model.train()
            # 前向传播过程，输入为一个batch，输出为一个batch中对应的预测
            output = model(b_x)
            # 查找每一行中最大值对应的行标
            pre_lab = torch.argmax(output, dim=1)
            # 计算损失函数
            loss = criterion(output, b_y)

            # 将梯度初始化为0（防止梯度累计）
            optimizer.zero_grad()
            # 反向传播计算
            loss.backward()
            # 根据网络反向传播的梯度信息来更新网络的参数，以起到降低loss函数计算值的作用
            optimizer.step()
            # 对损失函数进行累加
            train_loss += loss.item() * b_x.size(0)
            # 如果预测正确，则准确度+1
            train_corrects += torch.sum(pre_lab == b_y.data)
            # 用于训练的样本数量
            train_num += b_x.size(0)
        # 验证
        for step, (b_x, b_y) in enumerate(val_dataloader):
            # 特征放入设备中
            b_x = b_x.to(device)
            # 标签放入设备中
            b_y = b_y.to(device)
            # 设置模型为评估模式
            model.eval()
            # 前向传播过程，输入为一个batch，输出为一个batch中对应的预测
            output = model(b_x)
            # 查找每一行中最大值对应的行标
            pre_lab = torch.argmax(output, dim=1)
            # 计算损失函数
            loss = criterion(output, b_y)

            # 对损失函数进行累加
            train_loss += loss.item() * b_x.size(0)
            # 如果预测正确，则准确度+1
            train_corrects += torch.sum(pre_lab == b_y.data)
            # 用于验证的样本数量
            train_num += b_x.size(0)

        # 计算并保存每一轮的loss值和准确率
        # 计算并保存训练集的loss值
        train_loss_all.append(train_loss / train_num)
        # 计算并保存训练集的准确率
        train_acc_all.append(train_corrects.double().item() / train_num)

        # 计算并保存测试集的loss值
        val_loss_all.append(val_loss / val_num)
        # 计算并保存测试集的准确率
        val_acc_all.append(val_corrects.double().item() / val_num)

        print('{} Train Loss: {:.4f} Train Acc: {:.4f}'.format(epoch, train_loss_all[-1], train_acc_all[-1]))
        print('{} Val Loss: {:.4f} VBal Acc: {:.4f}'.format(epoch, val_loss_all[-1], val_acc_all[-1]))

        # 寻找最高准确度的参数
        if val_acc_all[-1] > best_acc:
            # 保存当前的最高准确度
            best_acc = val_acc_all[-1]
            # 保存当前的最高准确度
            best_model_wts = copy.deepcopy(model.state_dict())
        # 训练耗时
        time_use = time.time() - since
        print("训练耗费时间：{:.0f}m{:.0f}s".format(time_use//60,time_use%60))


        # 选择最优参数
        # 加载最高准确率下的模型参数
        model.load_state_dict(best_model_wts)
        torch.save(model.load_state_dict(best_model_wts), './PYCNN/best_model.pth')


        train_process = pd.DataFrame(data={"epoch": range(num_epochs),
                                           "train_loss_all": train_loss_all,
                                           "val_loss_all": val_loss_all,
                                           "train_acc_all": train_acc_all,
                                           "val_acc_all": val_acc_all})
        return train_process
'''
def train_model_process(model, train_dataloader, val_dataloader, num_epochs):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 检查设备
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)  # 使用 Adam 优化器，学习率为 0.01
    criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数
    model = model.to(device)  # 将模型移动到设备上
    best_model_wts = copy.deepcopy(model.state_dict())  # 复制当前模型参数

    best_acc = 0.0
    train_loss_all = []
    val_loss_all = []
    train_acc_all = []
    val_acc_all = []
    since = time.time()

    for epoch in range(num_epochs):
        print("Epoch {}/{}".format(epoch, num_epochs - 1))
        print("-" * 10)

        # 初始化参数
        train_loss = 0.0
        train_corrects = 0
        val_loss = 0.0
        val_corrects = 0
        train_num = 0
        val_num = 0

        # 训练阶段
        model.train()
        for step, (b_x, b_y) in enumerate(train_dataloader):
            b_x = b_x.to(device)
            b_y = b_y.to(device)

            output = model(b_x)
            pre_lab = torch.argmax(output, dim=1)

            loss = criterion(output, b_y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            train_loss += loss.item() * b_x.size(0)
            train_corrects += torch.sum(pre_lab == b_y.data)
            train_num += b_x.size(0)

        # 验证阶段
        model.eval()  # 设置模型为评估模式
        with torch.no_grad():  # 在验证时不计算梯度
            for step, (b_x, b_y) in enumerate(val_dataloader):
                b_x = b_x.to(device)
                b_y = b_y.to(device)

                output = model(b_x)
                pre_lab = torch.argmax(output, dim=1)

                loss = criterion(output, b_y)

                val_loss += loss.item() * b_x.size(0)
                val_corrects += torch.sum(pre_lab == b_y.data)
                val_num += b_x.size(0)

        # 计算并保存每一轮的损失数据和准确率
        train_loss_all.append(train_loss / train_num)
        train_acc_all.append(train_corrects.double().item() / train_num)
        if val_num > 0:  # 确保 val_num 不为零
            val_loss_all.append(val_loss / val_num)
            val_acc_all.append(val_corrects.double().item() / val_num)
        else:
            val_loss_all.append(float('inf'))  # 或者选择其他合适的方式处理
            val_acc_all.append(0)

        print('{} Train Loss: {:.4f} Train Acc: {:.4f}'.format(epoch, train_loss_all[-1], train_acc_all[-1]))
        print('{} Val Loss: {:.4f} Val Acc: {:.4f}'.format(epoch, val_loss_all[-1], val_acc_all[-1]))

        # 寻找最高准确度的参数
        if val_acc_all[-1] > best_acc:
            best_acc = val_acc_all[-1]
            best_model_wts = copy.deepcopy(model.state_dict())

        time_use = time.time() - since
        print("训练耗费时间：{:.0f}m{:.0f}s".format(time_use // 60, time_use % 60))

    # 加载最高准确率下的模型参数
    model.load_state_dict(best_model_wts)
    torch.save(best_model_wts, 'D:/code/Python/pytorch-le-net-5/module/best_model.pth')

    train_process = pd.DataFrame(data={"epoch": range(num_epochs),
                                        "train_loss_all": train_loss_all,
                                        "val_loss_all": val_loss_all,
                                        "train_acc_all": train_acc_all,
                                        "val_acc_all": val_acc_all})
    return train_process
def matplot_acc_loss(train_process):
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(train_process["epoch"], train_process.train_loss_all, 'ro-', label= "train loss")
    plt.plot(train_process["epoch"], train_process.val_loss_all, 'bs-', label= "val loss")
    plt.legend()
    plt.xlabel("epoch")
    plt.ylabel("loss")
    plt.subplot(1, 2, 2)
    plt.plot(train_process["epoch"], train_process.train_acc_all, 'ro-', label="train loss")
    plt.plot(train_process["epoch"], train_process.val_acc_all, 'bs-', label="val loss")
    plt.xlabel("epoch")
    plt.ylabel("acc")
    plt.legend()
    plt.show()


if __name__ == "__main__":
    # 将模型实例化
    LeNet = LeNet()
    train_dataloader, val_dataloader = train_val_data_process()
    train_process = train_model_process(LeNet, train_dataloader, val_dataloader, 30)
    matplot_acc_loss(train_process)


