from collections import OrderedDict

import torch
from ignite.utils import setup_logger
from torch import nn, optim
from torch.optim import SGD
from tqdm import tqdm

import cnn_model
import data
from ignite.engine import create_supervised_trainer, create_supervised_evaluator, Events
from ignite.contrib.handlers import FastaiLRFinder, ProgressBar
from ignite.metrics import Accuracy, Loss
import torchvision.models as models

if __name__ == '__main__':
    log_interval = 10
    epochs = 10
    train_loader, test_loader = data.get_data_loaders('dogvscat', 64)

    model = models.resnet18(pretrained=True)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    criterion = nn.NLLLoss()

    # Freeze the parameters
    # for param in model.parameters():
    #     param.requires_grad = True
    fc = nn.Sequential(OrderedDict([
        ('fc1', nn.Linear(512, 100)),
        ('relu', nn.ReLU()),
        ('fc2', nn.Linear(100, 2)),
        ('output', nn.LogSoftmax(dim=1))
    ]))

    model.fc = fc
    model.to(device)  # Move model before creating optimizer
    # save the initial state for the model and the optimizer
    # to reset them later
    optimizer = optim.Adam(model.parameters(), lr=3e-4)

    trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
    ProgressBar(persist=True).attach(trainer, output_transform=lambda x: {"batch loss": x})

    lr_finder = FastaiLRFinder()
    to_save = {'model': model, 'optimizer': optimizer}
    with lr_finder.attach(trainer, to_save, diverge_th=1.5) as trainer_with_lr_finder:
        trainer_with_lr_finder.run(train_loader)

    evaluator = create_supervised_evaluator(model, metrics={"acc": Accuracy(), "loss": Loss(nn.NLLLoss())},
                                            device=device)

    lr_finder.apply_suggested_lr(optimizer)
    print(optimizer.param_groups[0]['lr'])
    trainer.run(train_loader, max_epochs=5)

    evaluator.run(train_loader)
    print(evaluator.state.metrics)
