import copy
import time
import pandas as pd
import torch.optim
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import FashionMNIST
import matplotlib.pyplot as plt
from p01_LeNet import LeNet
from common.logging_format import *


def train(model: nn.Module, epoch: int, train_loader, test_loader, loss_fn, optim):
    total_train_step = 0
    logger.info('训练开始。。。')
    # 4个变量分别为 总训练损失、总训练准确率、总测试损失、总测试准确率
    train_loss_all = []
    train_acc_all = []
    test_loss_all = []
    test_acc_all = []
    # 最佳轮次，最佳准确度和最佳模型参数，为了计算和保存最佳模型
    best_epoch = 0
    best_acc = 0.0
    best_state_dict = None

    for i in range(1, epoch + 1):
        logger.info('---------------第{}轮训练---------------'.format(i))
        train_loss_epoch = 0.0
        train_acc_epoch = 0.0
        # 耗时计算
        time_since = time.time()
        # train_loader中的每个元素都是一个元组
        model.train()
        for input_, label in train_loader:
            output = model(input_)
            loss = loss_fn(output, label)

            # 注意这里的loss是针对一个批次的平均值
            train_loss_epoch += loss.item()
            train_acc = (output.argmax(1) == label).sum().item()
            train_acc_epoch += train_acc
            optim.zero_grad()
            loss.backward()
            optim.step()
            if total_train_step % 100 == 0:
                logger.info('epoch:[{:2d}/{}], step:{:5d}, loss:{:.5f}'.format(i, epoch, total_train_step, loss.item()))
            total_train_step += 1
        # 将批次的loss平均值累加之后需要除以队列的大小，而非整个训练集的大小，测试集也类似
        train_loss_all.append(train_loss_epoch / len(train_loader))
        train_acc_all.append(train_acc_epoch / len(train_dataset))

        test_loss_epoch = 0.0
        test_acc_epoch = 0.0
        model.eval()
        with torch.no_grad():
            for test_data, test_label in test_loader:
                test_output = model(test_data)
                test_loss = loss_fn(test_output, test_label)
                test_loss_epoch += test_loss.item()
                # 注意这里经过sum()运算之后，还是个tensor，需要item()一下，不然matplot没法处理
                test_acc = (test_output.argmax(1) == test_label).sum().item()
                test_acc_epoch += test_acc

        test_loss_all.append(test_loss_epoch / len(test_loader))
        test_acc_all.append(test_acc_epoch / len(test_dataset))
        time_cost = time.time() - time_since
        # 计算最优模型参数
        if test_acc_all[-1] > best_acc:
            best_epoch = i
            best_acc = test_acc_all[-1]
            best_state_dict = copy.deepcopy(model.state_dict())

        logger.info('Epoch test Loss: {:.5f}'.format(test_loss_all[-1]))
        logger.info('Epoch test accuracy: {:.5f}'.format(test_acc_all[-1]))
        logger.info('第{}轮训练和验证结束，耗时{:.2f}s'.format(i, time_cost))
    logger.info('全部结束, 最佳轮次{}, 最佳准确率{:.5f}, 保存中... '.format(best_epoch, best_acc))
    torch.save(model.load_state_dict(best_state_dict), 'le_net_model.pth')

    # 将总训练损失、总训练准确率、总测试损失、总测试准确率放到一个DataFrame中
    train_process = pd.DataFrame(data={
        'epoch': range(1, epoch+1),
        'train_loss_all': train_loss_all,
        'train_acc_all': train_acc_all,
        'test_loss_all': test_loss_all,
        'test_acc_all': test_acc_all,
    })
    return train_process


def matplot_acc_loss(train_process: pd.DataFrame):
    plt.figure(figsize=(8, 4))
    plt.subplot(1, 2, 1)
    plt.plot(train_process['epoch'], train_process.train_loss_all, 'ro-', label='train_loss')
    plt.plot(train_process['epoch'], train_process.test_loss_all, 'bs-', label='test_loss')
    plt.legend()
    plt.xlabel('epoch')
    plt.ylabel('loss')

    plt.subplot(1, 2, 2)
    plt.plot(train_process['epoch'], train_process.train_acc_all, 'ro-', label='train_acc')
    plt.plot(train_process['epoch'], train_process.test_acc_all, 'bs-', label='test_acc')
    plt.legend()
    plt.xlabel('epoch')
    plt.ylabel('acc')
    plt.savefig('training_plot.png', dpi=120)
    plt.show()


if __name__ == '__main__':
    logger = Logger("train_log.log", "train_log")
    transformers = transforms.Compose(
        [transforms.Resize(size=28), transforms.ToTensor()]
    )
    train_dataset = FashionMNIST(root='../data/',
                                 train=True,
                                 transform=transformers,
                                 download=False)

    # print(len(train_data), train_data[0], train_data[0][0].shape)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=64,
                              shuffle=True,
                              # num_workers=8
                              )

    test_dataset = FashionMNIST(root='../data/',
                                train=False,
                                transform=transformers,
                                download=False)

    # print(len(train_data), train_data[0], train_data[0][0].shape)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=64,
                             shuffle=True,
                             # num_workers=8
                             )

    logger.info("训练集数据长度:{}".format(len(train_dataset)))
    logger.info("测试集数据长度:{}".format(len(test_dataset)))

    # print(len(train_loader))

    # transform_img = transforms.ToPILImage()
    # img1 = transform_img(train_data[0][0])
    # img1.show()

    # 定义模型、损失函数和优化器
    model = LeNet()
    # 定义交叉熵损失函数，不需要任何参数，这种损失函数一般用在分类任务上
    loss_fn = nn.CrossEntropyLoss()
    # 优化器有几个参数，模型参数、学习率、
    optim = torch.optim.Adam(model.parameters(), lr=1e-3)

    train_process = train(model, 10, train_loader, test_loader, loss_fn, optim)
    matplot_acc_loss(train_process)
