from torchvision.datasets import ImageFolder
from torchvision import transforms
import torch.utils.data as Data
import torch
import matplotlib.pyplot as plt
import numpy as np
from model import GoogLeNet, Inception
from torch import nn
import copy
import time
import pandas as pd


def train_val_data_process():
    ROOT_TRAIN = r"data\train"
    normalize = transforms.Normalize([0.162, 0.151, 0.138], [0.058, 0.052, 0.048])
    train_transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), normalize])

    train_data = ImageFolder(ROOT_TRAIN, transform=train_transforms)
    # print(train_data.class_to_idx)

    train_data, val_data = Data.random_split(train_data, [round(0.8 * len(train_data)), round(0.2 * len(train_data))])

    train_dataloader = Data.DataLoader(dataset=train_data,
                                       batch_size=64,
                                       shuffle=True,
                                       num_workers=0)

    val_dataloader = Data.DataLoader(dataset=val_data,
                                     batch_size=64,
                                     shuffle=True,
                                     num_workers=0)
    return train_dataloader, val_dataloader

def train_model_process(model, train_dataloader, val_dataloader, num_epochs):
    # 设定训练所用到的设备，有GPU用GPU没有GPU用CPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 使用Adam优化器，学习率为0.001
    optimzer = torch.optim.Adam(model.parameters(), lr=0.001)
    # 损失函数为交叉熵函数
    criterion = nn.CrossEntropyLoss()
    # 将模型放入到训练设备中
    model = model.to(device)
    # 复制当前模型的参数
    best_model_bts = copy.deepcopy(model.state_dict())

    # 初始化参数
    # 最高准确度
    best_acc = 0.0
    # 训练集损失列表
    train_loss_all = []
    # 验证集损失列表
    val_loss_all = []
    # 训练集准确度列表
    train_acc_all = []
    # 验证集准确度列表
    val_acc_all = []
    # 当前时间
    since = time.time()

    for epoch in range(num_epochs):
        print("Epoch {}/{}".format(epoch, num_epochs - 1))
        print("-" * 10)

        # 初始化参数
        # 训练集损失函数
        train_loss = 0.0
        # 训练集准确度
        train_corrects = 0
        # 验证集损失函数
        val_loss = 0.0
        # 验证集准确度
        val_corrects = 0
        # 训练集样本数量
        train_num = 0
        # 验证集样本数量
        val_num = 0
        # 对每一个mini_batch训练和计算
        for step, (b_x, b_y) in enumerate(train_dataloader):
            # 将特征放入到训练设备中
            b_x = b_x.to(device)
            # 将标签放入到训练设备中
            b_y = b_y.to(device)
            # 设置模型为训练模式
            model.train()
            # 前向传播过程，输入为一个batch,输出为一个batch中对应的预测   [# 将数据放入模型中进行前向传播]
            output = model(b_x)
            # 查找每行中最大值对应的行标
            pre_lab = torch.argmax(output, dim=1)
            # 计算每个batch的损失函数
            loss = criterion(output, b_y)
            # 将梯度初始化为0
            optimzer.zero_grad()
            # 反向传播计算
            loss.backward()
            # 根据网络反向传播的梯度信息来更新网络的参数，以起到降低loss函数计算值的作用
            optimzer.step()
            # 对损失函数进行累加
            train_loss += loss.item() * b_x.size(0)  # loss.item()是每个样本的平均损失值，【loss.item() * b_x.size(0)是这个批次总损失值】
            # 如果预测正确 train_corrects值加1
            train_corrects += torch.sum(pre_lab == b_y.data)
            # 当前用于训练的样本数量
            train_num += b_x.size(0)
        for step, (b_x, b_y) in enumerate(val_dataloader):
            # 将特征放入到验证设备中
            b_x = b_x.to(device)
            # 将标签放入到验证设备中
            b_y = b_y.to(device)
            # 设置模型为验证（评估）模式
            model.eval()
            # 前向传播过程，输入为一个batch,输出为一个batch中对应的预测
            output = model(b_x)
            # 查找每行中最大值对应的行标
            pre_lab = torch.argmax(output, dim=1)
            # 计算每个batch的损失函数
            loss = criterion(output, b_y)

            # 因为验证是不需要进行反向传播计算的，所以反向传播以及更新参数相关操作不需要

            # 对损失函数进行累加
            val_loss += loss.item() * b_x.size(0)
            # 如果预测正确 train_corrects值加1
            val_corrects += torch.sum(pre_lab == b_y.data)
            # 当前用于验证的样本数量
            val_num += b_x.size(0)

        # 计算并保存每一次迭代的loss值和准确率
        # 计算并保存训练集的loss值
        train_loss_all.append(train_loss / train_num)
        # 计算并保存训练集的准确率
        train_acc_all.append(train_corrects.double().item() / train_num)
        # 计算并保存验证集的loss值
        val_loss_all.append(val_loss / val_num)
        # 计算并保存验证集的准确率
        val_acc_all.append(val_corrects.double().item() / val_num)
        print("{} Train Loss: {:.4f} Train Acc: {:.4f}".format(epoch, train_loss_all[-1], train_acc_all[-1]))
        print("{} Val Loss: {:.4f} Val Acc: {:.4f}".format(epoch, val_loss_all[-1], val_acc_all[-1]))

        # 寻找最高准确度的权重
        if (val_acc_all[-1] > best_acc):
            best_acc = val_acc_all[-1]
            best_model_bts = copy.deepcopy(model.state_dict())
        # 训练耗费时间
        time_use = time.time() - since
        print("训练和验证耗费时间：{:.0f}m {:.0f}s".format(time_use // 60, time_use % 60))

    # 选择最优参数
    # 加载最高准确率的模型参数

    # torch.save(model.state_dict(best_model_bts), "./model/best_model.pth")
    torch.save(best_model_bts, "./model/best_model.pth")
    train_process = pd.DataFrame(data={"epoch": range(num_epochs),
                                       "train_loss_all": train_loss_all,
                                       "val_loss_all": val_loss_all,
                                       "train_acc_all": train_acc_all,
                                       "val_acc_all": val_acc_all})
    return train_process


def matplot_acc_loss(train_process):
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(train_process["epoch"], train_process.train_loss_all, "ro-", label="train loss")
    plt.plot(train_process["epoch"], train_process.val_loss_all, "bs-", label="val loss")
    plt.legend()
    plt.xlabel("epoch")
    plt.ylabel("loss")

    plt.subplot(1, 2, 2)
    plt.plot(train_process["epoch"], train_process.train_acc_all, "ro-", label="train acc")
    plt.plot(train_process["epoch"], train_process.val_acc_all, "bs-", label="val acc")
    plt.legend()
    plt.xlabel("epoch")
    plt.ylabel("acc")

    plt.legend()
    plt.show()


if __name__ == "__main__":
    # 加载需要的模型
    model = GoogLeNet(Inception)
    # 加载数据集
    train_dataloader, val_dataloader = train_val_data_process()
    # 利用现有的模型进行模型的训练
    train_process = train_model_process(model, train_dataloader, val_dataloader, 50)
    matplot_acc_loss(train_process)
