import os
import time
import json
import argparse

import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter

from src.loaders import create_loader
from src.config import config
from src.models import create_classification_net
from src.lr_adjuster import CosineAnnealing, LinearDecay
from util.average_meter import AverageMeter


def parse_args():
    """parse arguments from command line"""
    parser = argparse.ArgumentParser("Train AlexNet")
    parser.add_argument("--data_name", type=str, default="mnist")
    parser.add_argument("--arch_name", type=str, default="simplenet")
    parser.add_argument("--lr_init", type=float, default=1e-2)
    parser.add_argument("--lr_max", type=float, default=1e-2)
    parser.add_argument("--lr_min", type=float, default=1e-5)
    parser.add_argument("--resume_epoch", type=int, default=0)
    parser.add_argument("--warmup_epochs", type=int, default=0)
    parser.add_argument("--total_epochs", type=int, default=20)
    return parser.parse_args()


def update_config():
    """update configuration"""
    args = parse_args()
    config.data_name = args.data_name
    config.arch_name = args.arch_name
    config.lr_init = args.lr_init
    config.lr_max = args.lr_max
    config.lr_min = args.lr_min
    config.resume_epoch = args.resume_epoch
    config.warmup_epochs = args.warmup_epochs
    config.total_epochs = args.total_epochs


def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res


def train_epoch(net, loader, loss_fn, optimizer, device, cur_epoch,
                total_epochs, steps_per_epoch, lr_adjuster, writer=None):
    """one training cycle"""
    batch_time = AverageMeter()
    losses = AverageMeter()
    net.train()
    end = time.time()
    for i, (feature, target) in enumerate(loader):
        cur_iter = cur_epoch * steps_per_epoch + i
        lr = lr_adjuster(optimizer, cur_iter)
        # print(feature.size())
        feature = feature.to(device)
        target = target.to(device)
        output = net(feature)
        loss = loss_fn(output, target)
        optimizer.zero_grad()
        # grads = loss.backward()
        loss.backward()
        optimizer.step()
        losses.update(loss.item(), feature.size(0))
        batch_time.update(time.time() - end)

        if i + 1 == steps_per_epoch:
            msg = "epoch: [{:3d}:{:3d}], epoch time: {:5.3f}, steps: {:5d}, " \
                  "per step time: {:5.3f}, avg loss: {:5.3f}, lr: {:5.3}".format(
                    cur_epoch + 1, total_epochs, batch_time.sum, batch_time.count,
                    batch_time.avg, losses.avg, lr)
            print(msg, flush=True)
        end = time.time()

    if writer:
        writer.add_scalar("train_loss", losses.avg, cur_epoch+1)

    return losses.avg


def infer(net, loader, loss_fn, device, cur_epoch, writer=None):
    """inference"""
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    net.eval()
    with torch.no_grad():
        end = time.time()
        for i, (feature, target) in enumerate(loader):
            feature = feature.to(device)
            target = target.to(device)
            output = net(feature)
            loss = loss_fn(output, target)

            losses.update(loss.item(), feature.size(0))
            prec1, prec5 = accuracy(output, target, (1, 5))
            top1.update(prec1[0].item(), feature.size(0))
            top5.update(prec5[0].item(), feature.size(0))
            batch_time.update(time.time() - end)
            end = time.time()
            # show inference message
        msg = 'Test: Time {batch_time.avg:.3f}\t' \
              'Loss {loss.avg:.4f}\t' \
              'Error@1 {error1:.3f}\t' \
              'Error@5 {error5:.3f}\t' \
              'Accuracy@1 {top1.avg:.3f}\t' \
              'Accuracy@5 {top5.avg:.3f}\t'.format(
            batch_time=batch_time, loss=losses, top1=top1, top5=top5,
            error1=100 - top1.avg, error5=100 - top5.avg)
        print(msg, flush=True)
        if writer:
            writer.add_scalar("infer_loss", losses.avg, cur_epoch)
            writer.add_scalar("top1_acc", top1.avg, cur_epoch)
            writer.add_scalar("top5_acc", top5.avg, cur_epoch)

        return top1.avg


def main():
    """task main function"""
    # args
    update_config()

    # tensorboard
    writer = SummaryWriter(config.log_path)

    # environment setting
    cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.enabled = True
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # device_id = 0
    # device_num = 1
    print("Is cuda available?", torch.cuda.is_available())

    # dataset
    train_loader = create_loader(config.data_name, True)
    infer_loader = create_loader(config.data_name, False)

    # net
    net = create_classification_net(config.arch_name, num_classes=10).to(device)
    loss_fn = torch.nn.CrossEntropyLoss().to(device)

    # optimizer
    optimizer = optim.SGD(
        net.parameters(),
        lr=config.lr_init,
        momentum=config.momentum,
        weight_decay=config.weight_decay,
        nesterov=config.nesterov
    )

    # output setting
    steps_per_epoch = len(train_loader)
    # best_model_path = os.path.join(config.output_path, "alexnet_mnist_best.pth")
    # best_note_path = os.path.join(config.output_path, "alexnet_mnist_best.json")

    # lr adjuster
    """adjuster = CosineAnnealing(lr_min=config.lr_min,
                               lr_max=config.lr_max,
                               total_iters=config.total_epochs*steps_per_epoch,
                               warmup_iters=config.warmup_epochs*steps_per_epoch,
                               lr_init=config.lr_init)"""
    adjuster = LinearDecay(lr_min=config.lr_min,
                           lr_max=config.lr_max,
                           total_iters=config.total_epochs * steps_per_epoch,
                           warmup_iters=config.warmup_epochs * steps_per_epoch,
                           lr_init=config.lr_init)

    # result recorder
    loss_each_epoch = list()
    top1_each_epoch = list()

    for cur_epoch in range(config.total_epochs):
        # train
        loss = train_epoch(net, train_loader, loss_fn, optimizer, device, cur_epoch,
                           config.total_epochs, steps_per_epoch, adjuster, writer)
        loss_each_epoch.append(loss)

        # save model
        model_path = os.path.join(config.output_path, f"{config.arch_name}_{config.data_name}_{cur_epoch}.pth")
        torch.save(net.state_dict(), model_path)

        # save note
        note_path = os.path.join(config.output_path, f"{config.arch_name}_{config.data_name}_{cur_epoch}.json")
        note = {
            "epoch": cur_epoch+1,
            "loss": loss,
            "checkpoint_path": model_path
        }
        with open(note_path, "w") as f:
            json.dump(note, f)
        
        # infer
        top1 = infer(net, infer_loader, loss_fn, device, cur_epoch, writer)
        top1_each_epoch.append(top1)

    writer.close()


if __name__ == "__main__":
    main()
