from dataset import cifar10, mini_imagenet
from resnet import *
from train_test import *
from utils import count_params
from copy import deepcopy

import os
import time
import datetime
import torch
import argparse
import wandb


def main(args):
    if args.dataset == 'cifar10':
        assert args.arch in ["resnet20", "resnet32", "resnet44", "resnet56", "resnet110"]
    else:
        assert args.arch in ["resnet18", "resnet34", "resnet50", "resnet101", "resnet152"]

    batch_size = args.batch_size

    start_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    results_file = args.log_path + f"/results{start_time}.txt"
    checkpoint_root = args.saved_path + f"/checkpoints{start_time}"
    model_path = args.saved_path + f"/{args.arch}_{args.dataset}_{start_time}.pth"

    wandb.init(
        project = f"train on {args.dataset}",
        
        config = {
            "arch": args.arch,
            "dataset": args.dataset,
            "lr": args.lr,
            "epochs": args.epochs,
            "batch_size": args.batch_size,
        }
    )

    num_workers = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])

    # create dataset
    if args.dataset == 'cifar10':
        train_loader = cifar10(args.dataset_path, train=True,
                               batch_size=batch_size, num_workers=num_workers)
        valid_loader = cifar10(args.dataset_path, train=False,
                               batch_size=batch_size, num_workers=num_workers)
    elif args.dataset == 'mini-imagenet':
        train_loader = mini_imagenet(args.dataset_path, train=True, 
                                     batch_size=batch_size, num_workers=num_workers)
        valid_loader = mini_imagenet(args.dataset_path, train=False,
                                     batch_size=batch_size, num_workers=num_workers)
    else:
        raise ValueError(f"Unknown dataset: {args.dataset}")
    
    # create model
    model = eval(args.arch)()

    # load pretrained model
    if args.pretrained:
        print("Using pretrained model...")
        model.load_state_dict(torch.load(args.pretrained))
    else:
        print("From scratch...")

    # to cuda
    model.cuda()

    # create optimizer and lr_scheduler
    optimizer = torch.optim.SGD(
        model.parameters(), args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay
    )
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[100, 150], last_epoch=args.start_epoch - 1)
    
    # for deep cifar10 models, use warm-up
    if args.arch in ['resnet1202', 'resnet110']:
        # for resnet1202 original paper uses lr=0.01 for first 400 minibatches for warm-up
        # then switch back. In this setup it will correspond for first epoch.
        for param_group in optimizer.param_groups:
            param_group['lr'] = args.lr * 0.1

    # load checkpoint
    if args.checkpoint:
        checkpoint = torch.load(args.checkpoint, map_location='cpu')
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    # init arguments
    start_time = time.time()
    best_acc = 0

    # training loop
    for epoch in range(args.start_epoch, args.epochs):
        print("Train...")
        loss = train_iter(model, train_loader, optimizer, epoch)

        lr = optimizer.param_groups[0]['lr']
        # resume lr for deep cifar10 models after warm-up
        if epoch == 0 and args.arch in ['resnet1202', 'resnet110']:
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr
        lr_scheduler.step()
        
        print("Test...")
        accuracy, vloss = test_iter(model, valid_loader, [1, 5], need_loss=True)
        params = count_params(model) / 1e6

        print(f"#[Epoch {epoch}]"
              f" LR: {lr:.6f}"
              f", Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
              f", Valid Loss: {vloss:.3f}"
              f", Params: {params:.3f}M")

        wandb.log({
            "train_loss": loss,
            "lr": lr,
            "valid_top1_accuracy": accuracy[0],
            "valid_top5_accuracy": accuracy[1],
            "valid_loss": vloss,
            "params": params,
        })

        # log and save model
        with open(results_file, "a") as f:
            train_info = f"[epoch: {epoch}]\n" \
                         f"train_loss: {loss:.4f}\n" \
                         f"lr: {lr:.6f}\n"
            f.write(train_info + "\n")

        save_file = {
            "model": model.state_dict(),
            "optimizer": optimizer.state_dict(),
            "lr_scheduler": lr_scheduler.state_dict(),
            "epoch": epoch,
            "args": args
        }

        if not os.path.exists(checkpoint_root):
            os.mkdir(checkpoint_root)
        
        if args.save_every:
            torch.save(save_file, os.path.join(
                checkpoint_root, f"{args.arch}_{args.dataset}_{epoch}.pt"))
        else:
            # save last checkpoint
            torch.save(save_file, os.path.join(
                checkpoint_root, f"{args.arch}_{args.dataset}_last.pt"))
            # save best model
            if accuracy[0] > best_acc:
                best_acc = accuracy[0]
                torch.save(model.state_dict(), model_path)
    
    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print("training time {}".format(total_time_str))
    wandb.finish()


def parse_args():
    parser = argparse.ArgumentParser(description="Model Training")

    parser.add_argument('--arch', default='resnet20', type=str, help='model architecture')
    parser.add_argument("--dataset", default="cifar10", type=str, help="dataset name")
    parser.add_argument("--dataset-path", default="./data", type=str, help="dataset path")

    parser.add_argument("-b", "--batch-size", default=128, type=int, help="batch size")
    parser.add_argument("--epochs", default=200, type=int, help="number of total epochs to train")
    parser.add_argument('--lr', default=0.1, type=float, help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')

    parser.add_argument('--print-freq', default=1000, type=int, help='print frequency')
    parser.add_argument('--checkpoint', help='resume from checkpoint')
    parser.add_argument('--pretrained', help='pretrained model')
    parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
                        help='start epoch')
    
    parser.add_argument('--saved-path', default='./saved', type=str, help='path to save model')
    parser.add_argument('--log-path', default='./log', type=str, help='path to save log')
    parser.add_argument('--save-every', action='store_true', 
                        help='save every checkpoint (default: save best model and last checkpoint)')

    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    if not os.path.exists(args.saved_path):
        os.mkdir(args.saved_path)
    if not os.path.exists(args.log_path):
        os.mkdir(args.log_path)
    main(args)
