import torch
import os
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from loguru import logger

from model.LeNet import LeNet5
from config import cfg


def save_model(epoch, model_state, optimizer_state, loss, save_path):
    torch.save({
        'epoch': epoch,
        'model_state_dict': model_state,
        'optimizer_state_dict': optimizer_state,
        'loss': loss,
    }, save_path)


def load_dataset():
    mnist_test = MNIST(root=cfg.dataset_dir,
                       train=False,
                       download=True,
                       transform=transforms.ToTensor())
    mnist_train = MNIST(root=cfg.dataset_dir,
                        train=True,
                        download=True,
                        transform=transforms.ToTensor())
    return mnist_train, mnist_test


def train(mnist_train, mnist_test):
    batch_size = cfg.batch_size
    learning_rate = cfg.learning_rate
    epochs = cfg.epochs
    train_info_per_batch = cfg.train_info_per_batch
    save_model_per_epoch = cfg.model_save_per_epoch

    model = LeNet5()
    train_iter = DataLoader(mnist_train, batch_size=batch_size, shuffle=True)
    test_iter = DataLoader(mnist_test, batch_size=batch_size, shuffle=True)
    # 定义优化器
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
    losses = []
    for epoch in range(epochs):
        for i, (x, y) in enumerate(train_iter):
            loss, logits = model(x, y)
            optimizer.zero_grad()
            loss.backward()
            # 执行梯度下降
            optimizer.step()
            losses.append(round(loss.item(), 4))
            if i % train_info_per_batch == 0:
                acc = (logits.argmax(1) == y).float().mean()
                logger.debug(f"Epochs[{epoch + 1}/{epochs}]--batch[{i}/{len(train_iter)}]"
                             f"--Acc: {round(acc.item(), 4)}--loss: {round(loss.item(), 4)}")
        logger.debug(f"Epochs[{epoch + 1}/{epochs}]--Acc on test {evaluate(test_iter, model)}")
        if epoch % save_model_per_epoch == 0:
            save_model(epoch=epoch,
                       model_state=model.state_dict(),
                       optimizer_state=optimizer.state_dict(),
                       loss=round(loss.item(), 4),
                       save_path=cfg.model_save_path)
    logger.debug(losses)

    return model


def evaluate(data_iter, model):
    model.eval()
    with torch.no_grad():
        acc_sum, n = 0.0, 0
        for x, y in data_iter:
            logits = model(x)
            acc_sum += (logits.argmax(1) == y).float().sum().item()
            n += len(y)
        model.train()
        return acc_sum / n


def inference(model, mnist_test):
    model.eval()
    y_true = mnist_test.targets[:5]
    batch = mnist_test.data[:5].unsqueeze(1).to(torch.float32)
    with torch.no_grad():
        logits = model(batch)
    y_pred = logits.argmax(1)
    print(f"真实标签为：{y_true}")
    print(f"预测标签为：{y_pred}")


if __name__ == '__main__':
    mnist_train, mnist_test = load_dataset()
    cls = train(mnist_train, mnist_test)
    inference(cls, mnist_test)
