import torch
from ignite.utils import setup_logger
from torch import nn, optim
from torch.optim import SGD
from tqdm import tqdm

import cnn_model
import data
from ignite.engine import create_supervised_trainer, create_supervised_evaluator, Events
from ignite.contrib.handlers import FastaiLRFinder, ProgressBar
from ignite.metrics import Accuracy, Loss

if __name__ == '__main__':
    log_interval = 10
    epochs = 10
    train_loader, test_loader = data.get_data_loaders('dogvscat', 256)
    model = cnn_model.Net()
    device = "cuda" if torch.cuda.is_available() else "cpu"
    criterion = nn.NLLLoss()
    model.to(device)  # Move model before creating optimizer
    optimizer = optim.SGD(model.parameters(), lr=3e-4, momentum=0.9)

    # save the initial state for the model and the optimizer
    # to reset them later

    trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
    ProgressBar(persist=True).attach(trainer, output_transform=lambda x: {"batch loss": x})

    lr_finder = FastaiLRFinder()
    to_save = {'model': model, 'optimizer': optimizer}
    with lr_finder.attach(trainer, to_save, diverge_th=1.5) as trainer_with_lr_finder:
        trainer_with_lr_finder.run(train_loader)

    evaluator = create_supervised_evaluator(model, metrics={"acc": Accuracy(), "loss": Loss(nn.NLLLoss())},
                                            device=device)

    lr_finder.apply_suggested_lr(optimizer)
    print(optimizer.param_groups[0]['lr'])
    trainer.run(train_loader, max_epochs=5)

    evaluator.run(train_loader)
    print(evaluator.state.metrics)
