from dataset import cifar10, mini_imagenet
from svd import *
from resnet import *
from train_test import *
from utils import *
from copy import deepcopy

import os
import time
import datetime
import torch
import argparse
import wandb


def main(args):
    if args.dataset == 'cifar10':
        assert args.arch in ["resnet20", "resnet32", "resnet44", "resnet56", "resnet110"]
    else:
        assert args.arch in ["resnet18", "resnet34", "resnet50", "resnet101", "resnet152"]

    batch_size = args.batch_size

    start_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    results_file = args.log_path + "/results{}.txt".format(start_time)
    checkpoint_root = args.saved_path + "/distl_checkpoints{}".format(start_time)
    model_root = args.saved_path + "/distl_{}_{}".format(args.arch, start_time)

    wandb.init(
        project = f"svd compress on {args.dataset} with layer-wise feature distillation",
        
        config = {
            "arch": args.arch,
            "dataset": args.dataset,
            "lr": args.lr,
            "epochs": args.epochs,
            "batch_size": args.batch_size,

            "prune_threshold": args.prune_threshold,
            "prune_interval": args.prune_interval,
            "lambd_inc_interval": args.lambd_inc_interval,
            "lambd": args.lambd,
            "lambd_inc": args.lambd_inc,
            "mu": args.mu,
            "svr_method": args.svr_method,

            "finetune": args.finetune,
            "finetune_epochs": args.finetune_epochs,
        }
    )

    num_workers = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])

    # create dataset
    if args.dataset == 'cifar10':
        train_loader = cifar10(args.dataset_path, train=True,
                               batch_size=batch_size, num_workers=num_workers)
        valid_loader = cifar10(args.dataset_path, train=False,
                               batch_size=batch_size, num_workers=num_workers)
    elif args.dataset == 'mini-imagenet':
        train_loader = mini_imagenet(args.dataset_path, train=True, 
                                     batch_size=batch_size, num_workers=num_workers)
        valid_loader = mini_imagenet(args.dataset_path, train=False,
                                     batch_size=batch_size, num_workers=num_workers)
    else:
        raise ValueError(f"Unknown dataset: {args.dataset}")
    
    # create model
    svd_model = eval(args.arch)(Conv2d=SVD_Conv2d, Linear=SVD_Linear,
                                decomposition_mode=args.decomposition_mode)
    full_params = count_params(svd_model)

    # load pretrained model
    if args.pretrained:
        print(f"Using pretrained {args.pretrained_type} model...")

        if args.pretrained_type == 'normal':
            norm_model = eval(args.arch)()
            norm_model.load_state_dict(torch.load(args.pretrained))

            norm_model.cuda()
            accuracy = test_iter(norm_model, valid_loader, [1, 5])
            print(f"Before pre-pruning"
                f", Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
                f", Params: {count_params(norm_model) / 1e6:.3f}M")

            svd_model.cuda()
            init_svd_model_from_normal(svd_model, norm_model)

        elif args.pretrained_type == 'svd':
            init_svd_model_from_state_dict(svd_model, torch.load(args.pretrained))
            svd_model.cuda()
            accuracy = test_iter(svd_model, valid_loader, [1, 5])
            print(f"Before pre-pruning"
                f", Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
                f", Params: {count_params(svd_model) / 1e6:.3f}M")
        else:
            raise ValueError(f"Unknown pretrained_type: {args.pretrained_type}") 
    else:
        print("From scratch...")

    model = svd_model

    def create_optimizer_and_lr_scheduler():
        lr = args.lr
        if args.lr_decay:
            params = count_params(model)
            lr *= params / full_params

        optimizer = torch.optim.SGD(
            model.parameters(),
            lr=lr,
            momentum=args.momentum,
            weight_decay=args.weight_decay,
        )

        # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        #     optimizer, args.epochs, eta_min=args.lr_min)

        lr_scheduler = torch.optim.lr_scheduler.StepLR(
            optimizer, step_size=10, gamma=0.5)
        return optimizer, lr_scheduler
    

    # create optimizer and lr_scheduler
    optimizer, lr_scheduler = create_optimizer_and_lr_scheduler()
    
    # load checkpoint
    if args.checkpoint:
        checkpoint = torch.load(args.checkpoint)
        init_svd_model_from_state_dict(model, checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    # init arguments
    start_time = time.time()
    lambd = args.lambd
    mu = args.mu
    prune_threshold = args.prune_threshold

    best_acc = 0
    best_path = None

    layer_groups = split_layers(model)
    teacher = deepcopy(model)
    penalty = \
        lambda model, input, label: svd_penalty(model, lambd, mu, args.svr_method)
    
    # training loop
    for epoch in range(args.start_epoch, args.epochs):

        # model pruning
        if epoch % args.prune_interval == 0:

            # layer-wise distillation
            for i, layers in reversed(list(enumerate(layer_groups))):
                print(f"Pruning {layers}")
                prune_model(model, prune_threshold, layers)

                print(f"Distillating {layers}")
                distillation(teacher, model, train_loader, 
                             layer_groups, i, args.lr, penalty, epochs=5)
                             
                print("Test...")
                accuracy = test_iter(model, valid_loader, [1, 5])
                print(f"After pruning and distillating {layers}"
                        f", Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}",
                        f", Params: {count_params(model) / 1e6:.3f}M")

            # reset best_acc and best_path
            best_acc = 0
            best_path = None

            # reset optimizer and lr_scheduler
            optimizer, lr_scheduler = create_optimizer_and_lr_scheduler()
            

        print("Train...")
        loss = svd_train_iter(model, train_loader, optimizer, epoch, 
                   lambd=lambd, mu=mu, svr_method=args.svr_method)
        
        print("Test...")
        accuracy, vloss = test_iter(model, valid_loader, [1, 5], need_loss=True)
        params = count_params(model) / 1e6

        print(f"#[Epoch {epoch}]"
              f" Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
              f", Valid Loss: {vloss:.3f}"
              f", Params: {params:.3f}M")

        wandb.log({
            "train_loss": loss,
            "lr": optimizer.param_groups[0]['lr'],
            "valid_top1_accuracy": accuracy[0],
            "valid_top5_accuracy": accuracy[1],
            "valid_loss": vloss,
            "params": params,
        })

        lr = optimizer.param_groups[0]['lr']
        lr_scheduler.step()

        if accuracy[0] > best_acc:
            best_acc = accuracy[0]
            model_path = model_root + \
                f"/{args.arch}_A{accuracy[0]:.3f}_P{params:.3f}M.pth"

            # delete previous best model
            if best_path is not None:
                try: os.remove(best_path)
                except: print(f"delete {best_path} failed")
            
            if not os.path.exists(model_root):
                os.mkdir(model_root)
            torch.save(model.state_dict(), model_path)
            best_path = model_path
        
        save_file = {
            "model": model.state_dict(),
            "optimizer": optimizer.state_dict(),
            "lr_scheduler": lr_scheduler.state_dict(),
            "epoch": epoch,
            "args": args
        }

        if not os.path.exists(checkpoint_root):
            os.mkdir(checkpoint_root)
        torch.save(save_file, os.path.join(checkpoint_root, f"{args.arch}_{args.dataset}_svd_last.pt"))
    
    if args.finetune:
        # recreate optimizer and lr_scheduler
        optimizer, lr_scheduler = create_optimizer_and_lr_scheduler()

        best_acc = 0
        best_path = None
        
        # finetune
        for epoch in range(args.finetune_epochs):
            print("Finetune...")
            loss = normal_train_iter(model, train_loader, optimizer, epoch)
            
            print("Test...")
            accuracy, vloss = test_iter(model, valid_loader, [1, 5], need_loss=True)
            params = count_params(model) / 1e6

            print(f"#[Epoch {epoch}]"
                f" Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
                f", Valid Loss: {vloss:.3f}"
                f", Params: {params:.3f}M")

            wandb.log({
                "train_loss": loss,
                "lr": optimizer.param_groups[0]['lr'],
                "valid_top1_accuracy": accuracy[0],
                "valid_top5_accuracy": accuracy[1],
                "valid_loss": vloss,
                "params": params,
            })

            lr = optimizer.param_groups[0]['lr']
            lr_scheduler.step()

            save_file = {
                "model": model.state_dict(),
                "optimizer": optimizer.state_dict(),
                "lr_scheduler": lr_scheduler.state_dict(),
                "epoch": epoch,
                "args": args
            }

            if accuracy[0] > best_acc:
                best_acc = accuracy[0]
                model_path = model_root + \
                    f"/{args.arch}_finetune_A{accuracy[0]:.3f}_P{params:.3f}M.pth"
                
                # delete previous best model
                if best_path is not None:
                    try: os.remove(best_path)
                    except: print(f"delete {best_path} failed")

                if not os.path.exists(model_root):
                    os.mkdir(model_root)
                torch.save(model.state_dict(), model_path)
                best_path = model_path

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print("training time {}".format(total_time_str))

    wandb.finish()


def parse_args():
    parser = argparse.ArgumentParser(description="Model Training")

    parser.add_argument('--arch', default='resnet20', type=str, help='model architecture')
    parser.add_argument("--dataset", default="cifar10", type=str, help="dataset name")
    parser.add_argument("--dataset-path", default="./data", type=str, help="dataset path")

    parser.add_argument('--decomposition-mode', default='channel', type=str, help='channel or spatial')
    parser.add_argument('--prune-threshold', default=0.01, type=float, help='threshold of pruning')
    parser.add_argument('--lambd', default=0.0001, type=float, help='coefficient of orthogonality penalty')
    parser.add_argument('--mu', default=0.001, type=float, help='coefficient of sigular value regularization')
    parser.add_argument('--lambd-inc', default=10, type=float, help='multiplier of orthogonality penalty')
    parser.add_argument('--svr-method', default='l1', type=str, help='method of sigular value regularization')

    parser.add_argument('--lambd-inc-interval', default=10000, type=int, help='interval of increasing lambda')
    parser.add_argument('--prune-interval', default=20, type=int, help='interval of pruning')
    parser.add_argument('--lr-decay', action='store_true', help='whether to decay learning rate')

    parser.add_argument("-b", "--batch-size", default=128, type=int, help="batch size")
    parser.add_argument("--epochs", default=300, type=int, help="number of total epochs to train")
    parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')

    parser.add_argument('--finetune', action='store_true', help='whether to finetune the original model')
    parser.add_argument('--finetune-epochs', default=30, type=int, help='number of epochs to finetune')

    parser.add_argument('--print-freq', default=1000, type=int, help='print frequency')
    parser.add_argument('--checkpoint', help='resume from checkpoint')
    parser.add_argument('--pretrained', help='pretrained model')
    parser.add_argument('--pretrained-type', default='normal', type=str, help='normal or svd')
    parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
                        help='start epoch')
    
    parser.add_argument('--saved-path', default='./saved', type=str, help='path to save model')
    parser.add_argument('--log-path', default='./log', type=str, help='path to save log')

    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    if not os.path.exists(args.saved_path):
        os.mkdir(args.saved_path)
    if not os.path.exists(args.log_path):
        os.mkdir(args.log_path)
    main(args)
