# -*- coding: utf-8 -*-

import time
import torch
import torch.nn as nn

from config.defaults import CONFIG as cfg
from data.build_data_loader import build_data_loader
from models import resnet20
from utils import print_train_log


def validate(model, test_data_loader, device):
    total, correct_num = 0, 0
    model.eval()
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_data_loader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            _, predicted = outputs.max(1)
            total += len(targets)
            correct_num += predicted.eq(targets).sum().item()
    accuracy = correct_num / total
    return accuracy


def train():
    custom_config_file_path = "../config/resnet.yaml"
    cfg.merge_from_file(custom_config_file_path)

    device = cfg.DEVICE

    cfg.PHASE = "test"
    test_data_loader = build_data_loader(cfg)
    cfg.PHASE = "train"
    train_data_loader = build_data_loader(cfg)

    model = resnet20()
    model = model.to(device)

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=cfg.OPTIMIZER.LEARNING_RATE,
                                momentum=0.9,
                                weight_decay=cfg.OPTIMIZER.WEIGHT_DECAY)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                        milestones=cfg.OPTIMIZER.MILESTONES)

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.to(device)

    start_epoch, epochs = cfg.TRAIN.START_EPOCH, cfg.TRAIN.EPOCHS
    train_logger = print_train_log()
    best_test_accuracy = 0
    for epoch in range(start_epoch, epochs + 1):
        model.train()
        epoch_time = 0
        lr_scheduler.step(epoch)
        for step, (image, target) in enumerate(train_data_loader, 1):
            batch_start = time.time()

            optimizer.zero_grad()
            image, target = image.to(device), target.to(device)
            predict = model(image)
            loss = criterion(predict, target)
            loss.backward()
            optimizer.step()

            batch_end = time.time()
            epoch_time += (batch_end - batch_start)

            if step % 100 == 0:
                lr = optimizer.param_groups[0]["lr"]
                _, predicted = predict.max(1)
                batch_correct = predicted.eq(target).sum().item()
                batch_accuracy = batch_correct / target.size(0)
                train_logger(epoch_index=epoch,
                             batch_index=step,
                             learning_rate=lr,
                             batch_time=batch_end - batch_start,
                             batch_loss=loss.item(),
                             batch_accuracy=batch_accuracy)

        current_test_accuracy = validate(model, test_data_loader, device)
        print(F"epoch: {epoch}, time: {epoch_time:.2f}, test accuracy: {current_test_accuracy:.2%}")

        if current_test_accuracy > best_test_accuracy:
            best_test_accuracy = current_test_accuracy
            save_data = {"model": model.state_dict(),
                         "optimizer": optimizer.state_dict(),
                         "scheduler": lr_scheduler.state_dict(),
                         "epoch": epoch,
                         "accuracy": best_test_accuracy}
            save_path = F"../trained_models/{cfg.MODEL.NAME}.pth"
            torch.save(save_data, save_path)


def main():
    # train()
    saved_model = torch.load("../trained_models/resnet.pth")
    print(F"best epoch/accuracy: {saved_model['epoch']}/{saved_model['accuracy']:.2%}")


if __name__ == "__main__":
    main()
