import copy
import time
import torch
from torchvision.datasets import FashionMNIST
from torchvision import transforms
import torch.utils.data as Data
import os
import numpy as np
import matplotlib.pyplot as plt
from model import LeNet
import torch.nn as nn
import pandas as pd


def train_val_data_process():
    train_data = FashionMNIST(root='./data',
                              train=True,
                              transform=transforms.Compose([transforms.Resize(size=28), transforms.ToTensor()]),
                              download=True)
    valid_size = int(0.2 * len(train_data))
    train_size = len(train_data) - valid_size
    train_dataset, val_dataset = Data.random_split(train_data, [train_size,valid_size ])
    train_dataloader = Data.DataLoader(dataset=train_dataset,
                                       batch_size=512,
                                       shuffle=True,
                                       pin_memory=True)
    val_dataloader = Data.DataLoader(dataset=val_dataset,
                                       batch_size=1024,
                                       shuffle=False,
                                       pin_memory=True)
    return train_dataloader, val_dataloader

def trainer(model, train_dataloader, val_dataloader, num_epochs):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
    criterion = nn.CrossEntropyLoss()
    model = model.to(device)
    if not os.path.isdir('./models'):
        os.mkdir('./models')
    best_model_wts = copy.deepcopy(model.state_dict())

    best_acc = 0.0
    train_loss_all = []
    val_loss_all = []
    train_acc_all = []
    val_acc_all = []
    since = time.time()

    for epoch in range(num_epochs):
        print("Epoch {}/{}".format(epoch+1, num_epochs))
        print("-"*10)

        train_loss = 0.0
        train_corrects = 0
        val_loss = 0.0
        val_corrects = 0
        train_num = 0
        val_num = 0

        #train
        model.train()
        for step, (x, y) in enumerate(train_dataloader):
            optimizer.zero_grad()
            x,y= x.to(device),y.to(device)
            output = model(x)
            pre_lab = torch.argmax(output, dim=1)
            loss = criterion(output, y)
            loss.backward()
            optimizer.step()
            #损失均值承批量大小，消除最后一个batch的数量误差
            train_loss += loss.item() * x.size(0)
            #布尔张量在求和时，True 会自动转成 1，False 转成 0,torch.sum() 计算的结果就是预测正确的样本数量
            train_corrects += torch.sum(pre_lab == y.data)
            train_num += x.size(0)
        #validation
        model.eval()
        for step, (x, y) in enumerate(val_dataloader):
            x,y= x.to(device),y.to(device)
            with torch.no_grad():
                output = model(x)
                pre_lab = torch.argmax(output, dim=1)
                loss = criterion(output, y)
                val_loss += loss.item() * x.size(0)
                val_corrects += torch.sum(pre_lab == y.data)
                val_num += x.size(0)
        train_loss_all.append(train_loss / train_num)
        train_acc_all.append(train_corrects.double().item() / train_num)

        # 计算并保存验证集的loss值
        val_loss_all.append(val_loss / val_num)
        # 将整数张量转化为浮点张量，再把0 维 torch.Tensor转成纯 Python 数值
        val_acc_all.append(val_corrects.double().item() / val_num)

        print(f"{epoch} train loss: {train_loss_all[-1]:.4f} train acc: {train_acc_all[-1]:.4f}")
        print(f"{epoch} val   loss: {val_loss_all[-1]:.4f} val   acc: {val_acc_all[-1]:.4f}")

        if val_acc_all[-1] > best_acc:
            best_acc = val_acc_all[-1]
            best_model_wts = copy.deepcopy(model.state_dict())

        time_use = time.time() - since
        print("训练和验证耗费的时间{:.0f}m{:.0f}s".format(time_use//60, time_use%60))

    # 选择最优参数，保存最优参数的模型
    torch.save(best_model_wts, "./models/best_model.pth")

    train_process = pd.DataFrame(data={"epoch":range(num_epochs),
                                       "train_loss_all":train_loss_all,
                                       "val_loss_all":val_loss_all,
                                       "train_acc_all":train_acc_all,
                                       "val_acc_all":val_acc_all,})

    return train_process


def matplot_acc_loss(train_process):
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(train_process['epoch'], train_process.train_loss_all, "ro-", label="Train loss")
    plt.plot(train_process['epoch'], train_process.val_loss_all, "bs-", label="Val loss")
    plt.legend()
    plt.xlabel("epoch")
    plt.ylabel("Loss")
    plt.subplot(1, 2, 2)
    plt.plot(train_process['epoch'], train_process.train_acc_all, "ro-", label="Train acc")
    plt.plot(train_process['epoch'], train_process.val_acc_all, "bs-", label="Val acc")
    plt.xlabel("epoch")
    plt.ylabel("acc")
    plt.legend()
    plt.show()


if __name__ == '__main__':
    LeNet = LeNet()
    train_data, val_data = train_val_data_process()
    train_process = trainer(LeNet, train_data, val_data, num_epochs=80)
    matplot_acc_loss(train_process)