from dataset import cifar10, mini_imagenet
from svd import *
from resnet import *
from train_test import *
from utils import count_params
from copy import deepcopy

import os
import time
import datetime
import torch
import argparse
import wandb


def main(args):
    if args.dataset == 'cifar10':
        assert args.arch in ["resnet20", "resnet32", "resnet44", "resnet56", "resnet110"]
    else:
        assert args.arch in ["resnet18", "resnet34", "resnet50", "resnet101", "resnet152"]

    batch_size = args.batch_size

    start_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    checkpoint_root = args.saved_path + "/svd_checkpoints{}".format(start_time)
    model_root = args.saved_path + "/svd_{}_{}".format(args.arch, start_time)

    wandb.init(
        project = f"svd compress on {args.dataset}",
        
        config = {
            "arch": args.arch,
            "dataset": args.dataset,
            "lr": args.lr,
            "epochs": args.epochs,
            "batch_size": args.batch_size,

            "pre_prune": args.pre_prune,
            "cr": args.cr,
            "prune_threshold": args.prune_threshold,
            "use_percent": args.use_percent,
            "global_prune": args.global_prune,
            "score": args.score,
            "prune_interval": args.prune_interval,

            "lambd_inc_interval": args.lambd_inc_interval,
            "lambd": args.lambd,
            "lambd_inc": args.lambd_inc,
            "mu": args.mu,
            "svr_method": args.svr_method,

            "loss_coef": args.loss_coef,
            "distillation": args.distillation,

            "finetune": args.finetune,
            "finetune_epochs": args.finetune_epochs,
            "finetune_kd": args.finetune_kd,
        }
    )

    num_workers = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])

    # create dataset
    if args.dataset == 'cifar10':
        train_loader = cifar10(args.dataset_path, train=True,
                               batch_size=batch_size, num_workers=num_workers)
        valid_loader = cifar10(args.dataset_path, train=False,
                               batch_size=batch_size, num_workers=num_workers)
    elif args.dataset == 'mini-imagenet':
        train_loader = mini_imagenet(args.dataset_path, train=True, 
                                     batch_size=batch_size, num_workers=num_workers)
        valid_loader = mini_imagenet(args.dataset_path, train=False,
                                     batch_size=batch_size, num_workers=num_workers)
    else:
        raise ValueError(f"Unknown dataset: {args.dataset}")
    
    # create model
    svd_model = eval(args.arch)(Conv2d=SVD_Conv2d, # Linear=SVD_Linear,
                                decomposition_mode=args.decomposition_mode)
    full_params = count_params(svd_model)
    origin_params = count_params(eval(args.arch)())

    # load pretrained model
    if args.pretrained:
        print(f"Using pretrained {args.pretrained_type} model...")

        if args.pretrained_type == 'normal':
            norm_model = eval(args.arch)()
            norm_model.load_state_dict(torch.load(args.pretrained))

            norm_model.cuda()
            accuracy = test_iter(norm_model, valid_loader, [1, 5])
            print(f"Before pre-pruning"
                f", Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
                f", Params: {count_params(norm_model) / 1e6:.3f}M")
            teacher = norm_model

            svd_model.cuda()
            init_svd_model_from_normal(svd_model, norm_model)

        elif args.pretrained_type == 'svd':
            init_svd_model_from_state_dict(svd_model, torch.load(args.pretrained))
            svd_model.cuda()
            accuracy = test_iter(svd_model, valid_loader, [1, 5])
            print(f"Before pre-pruning"
                f", Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
                f", Params: {count_params(svd_model) / 1e6:.3f}M")
            teacher = deepcopy(svd_model)
        else:
            raise ValueError(f"Unknown pretrained_type: {args.pretrained_type}")
        
        if args.pre_prune:
            # pre-prune the model
            prune_model(svd_model, args.prune_threshold,
                        percent=args.use_percent, global_prune=args.global_prune,
                        score=args.score, score_kwargs={'train_loader': train_loader},
                        target_params=origin_params * (1 - args.cr))

            # test model accuracy before training
            accuracy = test_iter(svd_model, valid_loader, [1, 5])
            print(f"After pre-pruning (before training)"
                f", Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
                f", Params: {count_params(svd_model) / 1e6:.3f}M")
    else:
        print("From scratch...")
        teacher = None

    model = svd_model
    # freeze teacher
    for p in teacher.parameters():
        p.requires_grad = False

    def create_optimizer_and_lr_scheduler():
        lr = args.lr
        if args.lr_decay:
            params = count_params(model)
            lr *= params / full_params

        optimizer = torch.optim.SGD(
            model.parameters(),
            lr=lr,
            momentum=args.momentum,
            weight_decay=args.weight_decay,
        )

        # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        #     optimizer, args.epochs, eta_min=args.lr_min)

        lr_scheduler = torch.optim.lr_scheduler.StepLR(
            optimizer, step_size=10, gamma=0.5)
        return optimizer, lr_scheduler
    

    # create optimizer and lr_scheduler
    optimizer, lr_scheduler = create_optimizer_and_lr_scheduler()
    
    # load checkpoint
    if args.checkpoint:
        checkpoint = torch.load(args.checkpoint)
        init_svd_model_from_state_dict(model, checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    # init arguments
    start_time = time.time()
    lambd = args.lambd
    mu = args.mu

    def penalty(model, input, output, label):
        loss = svd_penalty(model, lambd, mu, args.svr_method)
        if args.distillation > 0 and teacher is not None:
            teacher_output = teacher(input)
            loss += F.mse_loss(output, teacher_output) * args.distillation
        return loss

    best_acc = 0
    best_path = None

    # training loop
    for epoch in range(args.start_epoch, args.epochs):
        print("Train...")
        loss = train_iter(model, train_loader, optimizer, epoch, 
                          args.loss_coef, penalty)
        
        print("Test...")
        accuracy, vloss = test_iter(model, valid_loader, [1, 5], need_loss=True)
        params = count_params(model) / 1e6

        print(f"#[Epoch {epoch}]"
              f" Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
              f", Valid Loss: {vloss:.3f}"
              f", Params: {params:.3f}M")

        wandb.log({
            "train_loss": loss,
            "lr": optimizer.param_groups[0]['lr'],
            "valid_top1_accuracy": accuracy[0],
            "valid_top5_accuracy": accuracy[1],
            "valid_loss": vloss,
            "params": params,
        })

        lr = optimizer.param_groups[0]['lr']
        lr_scheduler.step()

        if accuracy[0] > best_acc:
            best_acc = accuracy[0]
            model_path = model_root + \
                f"/{args.arch}_A{accuracy[0]:.3f}_P{params:.3f}M.pth"
            
            # delete previous best model
            if best_path is not None:
                try: os.remove(best_path)
                except: print(f"delete {best_path} failed")

            if not os.path.exists(model_root):
                os.mkdir(model_root)
            torch.save(model.state_dict(), model_path)
            best_path = model_path

        # increase penalty coefficient
        if (epoch + 1) % args.lambd_inc_interval == 0:
            print("Increase lambda...")
            lambd *= args.lambd_inc
        
        # do model pruning
        if (epoch + 1) % args.prune_interval == 0:
            print("Pruning model...")
            prune_model(model, args.prune_threshold,
                        percent=args.use_percent, global_prune=args.global_prune,
                        score=args.score, score_kwargs={'train_loader': train_loader},
                        target_params=origin_params * (1 - args.cr))

            accuracy = test_iter(model, valid_loader, [1, 5])
            params = count_params(model)
            print(f"After pruning"
                  f", Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
                  f", Params: {params / 1e6:.3f}M")
            
            if params / origin_params <= 1 - args.cr:
                print("Model compression ratio reached, stop compression...")
                break
            
            # reset penalty coefficient
            lambd = args.lambd

            # reset best_acc and best_path
            best_acc = 0
            best_path = None

            # reset optimizer and lr_scheduler
            optimizer, lr_scheduler = create_optimizer_and_lr_scheduler()

        save_file = {
            "model": model.state_dict(),
            "optimizer": optimizer.state_dict(),
            "lr_scheduler": lr_scheduler.state_dict(),
            "epoch": epoch,
            "args": args
        }

        if not os.path.exists(checkpoint_root):
            os.mkdir(checkpoint_root)
        torch.save(save_file, os.path.join(checkpoint_root, f"{args.arch}_{args.dataset}_svd_last.pt"))
    
    if args.finetune:
        # recreate optimizer and lr_scheduler
        optimizer, lr_scheduler = create_optimizer_and_lr_scheduler()

        best_acc = 0
        best_path = None

        def penalty(model, input, output, label):
            loss = torch.Tensor([0])[0].cuda()
            if args.distillation > 0 and teacher is not None:
                teacher_output = teacher(input)
                loss += F.mse_loss(output, teacher_output) * args.distillation
            return loss
        
        _penalty = penalty if args.finetune_kd else None
        loss_coef = args.loss_coef if args.finetune_kd else 1.
        
        # finetune
        for epoch in range(args.finetune_epochs):
            print("Finetune...")
            loss = train_iter(model, train_loader, optimizer, epoch, loss_coef, _penalty)
            
            print("Test...")
            accuracy, vloss = test_iter(model, valid_loader, [1, 5], need_loss=True)
            params = count_params(model) / 1e6

            print(f"#[Epoch {epoch}]"
                f" Top1 Accuracy: {accuracy[0]:.3f}, Top5 Accuracy: {accuracy[1]:.3f}"
                f", Valid Loss: {vloss:.3f}"
                f", Params: {params:.3f}M")

            wandb.log({
                "train_loss": loss,
                "lr": optimizer.param_groups[0]['lr'],
                "valid_top1_accuracy": accuracy[0],
                "valid_top5_accuracy": accuracy[1],
                "valid_loss": vloss,
                "params": params,
            })

            lr = optimizer.param_groups[0]['lr']
            lr_scheduler.step()

            save_file = {
                "model": model.state_dict(),
                "optimizer": optimizer.state_dict(),
                "lr_scheduler": lr_scheduler.state_dict(),
                "epoch": epoch,
                "args": args
            }

            if accuracy[0] > best_acc:
                best_acc = accuracy[0]
                model_path = model_root + \
                    f"/{args.arch}_finetune_A{accuracy[0]:.3f}_P{params:.3f}M.pth"
                
                # delete previous best model
                if best_path is not None:
                    try: os.remove(best_path)
                    except: print(f"delete {best_path} failed")

                if not os.path.exists(model_root):
                    os.mkdir(model_root)
                torch.save(model.state_dict(), model_path)
                best_path = model_path

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print("training time {}".format(total_time_str))

    wandb.finish()


def parse_args():
    parser = argparse.ArgumentParser(description="Model Training")

    parser.add_argument('--arch', default='resnet20', type=str, help='model architecture')
    parser.add_argument("--dataset", default="cifar10", type=str, help="dataset name")
    parser.add_argument("--dataset-path", default="./data", type=str, help="dataset path")

    parser.add_argument('--decomposition-mode', default='channel', type=str, help='channel or spatial')
    parser.add_argument('--prune-threshold', default=0.01, type=float, help='threshold of pruning')
    parser.add_argument('--use-percent', default=False, action='store_true', help='whether to use percent of pruning')
    parser.add_argument('--global-prune', default=False, action='store_true', help='whether to prune globally')
    parser.add_argument('--cr', default=0.6, type=float, help='compression ratio')
    parser.add_argument('--score', default='l1', type=str, help='score function')

    parser.add_argument('--lambd', default=0.0001, type=float, help='coefficient of orthogonality penalty')
    parser.add_argument('--mu', default=0.001, type=float, help='coefficient of sigular value regularization')
    parser.add_argument('--lambd-inc', default=10, type=float, help='multiplier of orthogonality penalty')
    parser.add_argument('--svr-method', default='l1', type=str, help='method of sigular value regularization')
    parser.add_argument('--pre-prune', action='store_true', help='whether to prune model before training')

    parser.add_argument('--loss-coef', default=1, type=float, help='coefficient of loss')
    parser.add_argument('--distillation', default=0, type=float, help='coefficient of distillation loss')

    parser.add_argument('--lambd-inc-interval', default=5, type=int, help='interval of increasing lambda')
    parser.add_argument('--prune-interval', default=30, type=int, help='interval of pruning')
    parser.add_argument('--lr-decay', action='store_true', help='whether to decay learning rate')

    parser.add_argument("-b", "--batch-size", default=128, type=int, help="batch size")
    parser.add_argument("--epochs", default=300, type=int, help="number of total epochs to train")
    parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')

    parser.add_argument('--finetune', action='store_true', help='whether to finetune the original model')
    parser.add_argument('--finetune-epochs', default=30, type=int, help='number of epochs to finetune')
    parser.add_argument('--finetune-kd', action='store_true', 
                        help='whether to use knowledge distillation in finetuning')

    parser.add_argument('--print-freq', default=1000, type=int, help='print frequency')
    parser.add_argument('--checkpoint', help='resume from checkpoint')
    parser.add_argument('--pretrained', help='pretrained model')
    parser.add_argument('--pretrained-type', default='normal', type=str, help='normal or svd')
    parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
                        help='start epoch')
    
    parser.add_argument('--saved-path', default='./saved', type=str, help='path to save model')
    parser.add_argument('--log-path', default='./log', type=str, help='path to save log')

    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    if not os.path.exists(args.saved_path):
        os.mkdir(args.saved_path)
    if not os.path.exists(args.log_path):
        os.mkdir(args.log_path)
    main(args)
