import torch
import torch.nn as nn
import data
import model


def train(train_loader, test_loader, model, learning_rate, num_epochs):
    # loss and optimization
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # training
    total_step = len(train_loader)
    for epoch in range(num_epochs):
        for i, (images, labels) in enumerate(train_loader):
            # forward
            outputs = model(images)
            loss = criterion(outputs, labels)

            # backward and optimization
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % 100 == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
                      .format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))

    # evaluation and test
    model.eval()
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in test_loader:
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

        print('Test Accuracy of the model on the test images: {} %'.format(100 * correct / total))

    # save model
    model_info = {
        "iter_num": total_step * num_epochs,
        "optimizer": optimizer,
        "model": model.state_dict()
    }
    torch.save(model_info, 'model_info.ckpt')


if __name__ == '__main__':
    # hyper parameters
    num_epochs = 30
    num_classes = 10
    batch_size = 32
    learning_rate = 0.01

    train_loader, test_loader = data.get_cifar_data(batch_size)

    model = model.ConvNet(num_classes)

    train(train_loader, test_loader, model, learning_rate, num_epochs)
