import os
import argparse
import torch
import torch.nn as nn

from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
from lenet.config import DEFAULT_TRANSFORMS
from lenet.model import LeNet


def train(args):
    # create output directory
    os.makedirs(f"{args.output}", exist_ok=True)

    # loading the dataset and preprocessing
    train_dataset = MNIST(
        root="./data",
        train=True,
        transform=DEFAULT_TRANSFORMS,
        download=True,
    )
    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        shuffle=True,
    )

    # create model, loss function and optimizer
    model = LeNet().to(args.device)
    loss_fn = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

    total_step = len(train_loader)
    for epoch in range(1, args.epochs + 1):
        for i, (images, labels) in enumerate(train_loader):
            images = images.to(args.device)
            labels = labels.to(args.device)

            # Forward pass
            outputs = model(images)
            loss = loss_fn(outputs, labels)

            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % args.display_interval == 0:
                print(
                    "Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}".format(
                        epoch + 1, args.epochs, i + 1, total_step, loss.item()
                    )
                )

        if epoch % args.save_interval == 0:
            checkpoint_path = f"{args.output}/lenet_ckpt_{epoch}.pth"
            print(f"---- Saving checkpoint to: '{checkpoint_path}' ----")
            torch.save(model.state_dict(), checkpoint_path)


def parse_args():
    """
    Parse input arguments
    """
    parser = argparse.ArgumentParser(description="LeNet training")

    # fmt: off
    parser.add_argument('--device', default='cuda:0', type=str)
    parser.add_argument('--epochs', default=100, type=int, help='number of epochs to train')
    parser.add_argument('--batch-size', default=64, type=int, help='batch size for data loader')
    parser.add_argument('--num-workers', default=8, type=int, help='number of workers for data loader')
    parser.add_argument('--learning-rate', default=1e-3, type=float, help='number of workers for data loader')
    parser.add_argument('--display-interval', default=10, type=int)
    parser.add_argument('--save-interval', default=20, type=int)
    parser.add_argument('--start-epoch', default=1, type=int)
    parser.add_argument('--resume', default=True, type=bool)
    parser.add_argument("--output", default="output", type=str, help="directory for output")
    # fmt: on

    args = parser.parse_args()
    return args


if __name__ == "__main__":
    args = parse_args()
    train(args)
