import os
import pathlib
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.tensorboard import SummaryWriter
import data_creator
import trainers
import utils
from args import args


def main():
    if args.seed is not None:
        utils.set_seed(args.seed)

    i = 0
    while True:
        run_base_dir = pathlib.Path(
            f"{args.log_dir}/{args.exp_name}~try={str(i)}")

        if not run_base_dir.exists():
            os.makedirs(run_base_dir)
            args.exp_name = args.exp_name + f"~try={i}"
            break
        i += 1
    (run_base_dir / "settings.txt").write_text(str(args))
    args.run_base_dir = run_base_dir

    global logger
    logger.add_file(os.path.join(args.run_base_dir, "train.log"))
    logger.info(args)
    logger.info(f"=> Saving data in {run_base_dir}")


    if args.trainer == "trainer_task_first":
        run_task_first()
    elif args.trainer == "trainer_batch_first":
        run_batch_first()
    elif args.trainer == "dense":
        dense()
    elif args.trainer == "sswarmup":
        sswarmup()
    elif args.trainer == "ss":
        ss()
    else:
        raise RuntimeError(
            'unknown run mode, should be trainer_task_first,trainer_batch_first,dense,sswarmup or ss'
        )


def run_batch_first():
    data_loader = getattr(data_creator, args.dataset)()
    model = utils.get_model(args.model)

    if args.er_sparsity:
        for n, m in model.named_modules():
            if hasattr(m, "sparsity"):
                m.sparsity = min(
                    0.5,
                    args.sparsity
                    * (m.weight.size(0) + m.weight.size(1))
                    / (
                            m.weight.size(0)
                            * m.weight.size(1)
                            * m.weight.size(2)
                            * m.weight.size(3)
                    ),
                )
                print(f"Set sparsity of {n} to {m.sparsity}")

    model = model.to(args.device)
    writer = SummaryWriter(log_dir=args.run_base_dir)

    trainer = getattr(trainers, "trainer_batch_first")
    if hasattr(trainer, "init"):
        trainer.init(args)

    for p in model.parameters():
        p.grad = None

    trainer.train_cv(model, data_loader, writer)

    if args.save:
        torch.save(
            {
                "epoch": args.epochs,
                "arch": args.model,
                "state_dict": model.state_dict(),
                "args": args,
            },
            args.run_base_dir / "final.pt",
        )


def run_task_first():
    data_loader = getattr(data_creator, args.dataset)()

    if args.num_tasks:
        best_acc1 = [0.0 for _ in range(args.num_tasks)]
        curr_acc1 = [0.0 for _ in range(args.num_tasks)]

    model = utils.get_model(args.model) # GEMResNet18

    if args.er_sparsity:
        for n, m in model.named_modules():
            if hasattr(m, "sparsity"):
                m.sparsity = min(
                    0.5,
                    args.sparsity * (m.weight.size(0) + m.weight.size(1)) /
                    (m.weight.size(0) * m.weight.size(1) * m.weight.size(2) *
                     m.weight.size(3)),
                )
                logger.info(f"Set sparsity of {n} to {m.sparsity}")

    model = model.to(args.device)
    criterion = nn.CrossEntropyLoss().to(args.device)
    writer = SummaryWriter(log_dir=args.run_base_dir)

    num_tasks_learned = 0

    trainer = getattr(trainers, "trainer_task_first")
    # print(f"=> Using trainer {trainer}")

    train, test = trainer.train, trainer.test

    # Initialize model specific context (editorial note: avoids polluting main file)
    if hasattr(trainer, "init"):
        trainer.init(args)

    for idx in range(args.num_tasks or 0):
        logger.info(f"\nTask {args.dataset}: {idx}\n")

        model.apply(lambda m: setattr(m, "task", idx))

        assert hasattr(
            data_loader, "update_task"
        ), "[ERROR] Need to implement update task method for use with multitask experiments"

        data_loader.update_task(idx)

        for p in model.parameters():
            p.grad = None

        params = []
        for n, p in model.named_parameters():
            if not p.requires_grad:
                continue
            split = n.split(".")
            if split[-2] in ["scores", "s", "t"
                             ] and (int(split[-1]) == idx or
                                    (args.trainer and "nns" in args.trainer)):
                params.append(p)
            if (args.train_weight_tasks < 0
                    or num_tasks_learned < args.train_weight_tasks):
                if split[-1] == "weight" or split[-1] == "bias" or split[
                        -1] == 'threshold':
                    params.append(p)

        lr = (args.train_weight_lr if args.train_weight_tasks < 0
              or num_tasks_learned < args.train_weight_tasks else args.lr)

        if args.optimizer == "adam":
            optimizer = optim.Adam(params, lr=lr, weight_decay=args.wd)
        elif args.optimizer == "rmsprop":
            optimizer = optim.RMSprop(params, lr=lr)
        else:
            optimizer = optim.SGD(params,
                                  lr=lr,
                                  momentum=args.momentum,
                                  weight_decay=args.wd)

        train_epochs = args.epochs

        if args.no_scheduler:
            scheduler = None
        else:
            scheduler = CosineAnnealingLR(optimizer, T_max=train_epochs) # 应该修改

        for epoch in range(1, train_epochs + 1):
            train(
                model,
                writer,
                data_loader.train_loader,
                optimizer,
                criterion,
                epoch,
                idx,
                data_loader,
            )

            curr_acc1[idx] = test(model, writer, criterion,
                                  data_loader.dev_loader, epoch, idx)
            if curr_acc1[idx] > best_acc1[idx]:
                best_acc1[idx] = curr_acc1[idx]
            if scheduler:
                scheduler.step()

            if (args.iter_lim > 0
                    and len(data_loader.train_loader) * epoch > args.iter_lim):
                break

        utils.write_result_to_csv_dense(
            name=f"{args.exp_name}~set={args.dataset}~task={idx}",
            curr_acc1=curr_acc1[idx],
            best_acc1=best_acc1[idx],
            save_dir=args.run_base_dir,
        )

        del optimizer, scheduler, params

        num_tasks_learned += 1

        if args.trainer and "nns" in args.trainer:
            model.apply(lambda m: setattr(
                m, "num_tasks_learned",
                min(model.num_tasks_learned, args.num_tasks)))
        else:
            model.apply(
                lambda m: setattr(m, "num_tasks_learned", num_tasks_learned))

    if args.save:
        torch.save(
            {
                "epoch": args.epochs,
                "arch": args.model,
                "state_dict": model.state_dict(),
                "best_acc1": best_acc1,
                "curr_acc1": curr_acc1,
                "args": args,
            },
            args.run_base_dir / "final.pt",
        )


def dense():
    data_loader = getattr(data_creator, args.dataset)()

    if args.num_tasks: # wm:20
        best_acc1 = [0.0 for _ in range(args.num_tasks)]
        curr_acc1 = [0.0 for _ in range(args.num_tasks)]

    for idx in range(args.num_tasks or 0):

        model = utils.get_model(args.model)
        for n, m in model.named_modules():
            print(n)

        if args.er_sparsity:
            for n, m in model.named_modules():
                if hasattr(m, "sparsity"):
                    m.sparsity = min(
                        0.5,
                        args.sparsity * (m.weight.size(0) + m.weight.size(1)) /
                        (m.weight.size(0) * m.weight.size(1) * m.weight.size(2) *
                         m.weight.size(3)),
                    )
                    logger.info(f"Set sparsity of {n} to {m.sparsity}")

        model = model.to(args.device)
        criterion = nn.CrossEntropyLoss().to(args.device)
        writer = SummaryWriter(log_dir=args.run_base_dir)
        num_tasks_learned = 0
        trainer = getattr(trainers, "trainer_task_first")

        train, test = trainer.train, trainer.test

        # Initialize model specific context (editorial note: avoids polluting main file)
        if hasattr(trainer, "init"):
            trainer.init(args)

        logger.info(f"\nTask {args.dataset}: {idx}\n")

        model.apply(lambda m: setattr(m, "task", idx))

        # Update the data loader so that it returns the data for the correct task, also done by passing the task index.
        assert hasattr(
            data_loader, "update_task"
        ), "[ERROR] Need to implement update task method for use with multitask experiments"

        data_loader.update_task(idx)

        for p in model.parameters():
            p.grad = None

        params = []
        for n, p in model.named_parameters():
            if not p.requires_grad:
                continue
            split = n.split(".")
            if split[-2] in ["scores", "s", "t"
                             ] and (int(split[-1]) == idx or
                                    (args.trainer and "nns" in args.trainer)):
                params.append(p)

            if (args.train_weight_tasks < 0
                    or num_tasks_learned < args.train_weight_tasks):
                if split[-1] == "weight" or split[-1] == "bias" or split[
                        -1] == 'threshold':
                    params.append(p)

        lr = (args.train_weight_lr if args.train_weight_tasks < 0
              or num_tasks_learned < args.train_weight_tasks else args.lr)

        if args.optimizer == "adam":
            optimizer = optim.Adam(params, lr=lr, weight_decay=args.wd)
        elif args.optimizer == "rmsprop":
            optimizer = optim.RMSprop(params, lr=lr)
        else:
            optimizer = optim.SGD(params,
                                  lr=lr,
                                  momentum=args.momentum,
                                  weight_decay=args.wd)
        train_epochs = args.epochs
        if args.no_scheduler:
            scheduler = None
        else:
            scheduler = CosineAnnealingLR(optimizer, T_max=train_epochs) # 应该修改

        for epoch in range(1, train_epochs + 1):
            train(
                model,
                writer,
                data_loader.train_loader,
                optimizer,
                criterion,
                epoch,
                idx,
                data_loader,
            )

            curr_acc1[idx] = test(model, writer, criterion,
                                  data_loader.dev_loader, epoch, idx)
            if curr_acc1[idx] > best_acc1[idx]:
                best_acc1[idx] = curr_acc1[idx]
            if scheduler:
                scheduler.step()

            if (args.iter_lim > 0
                    and len(data_loader.train_loader) * epoch > args.iter_lim):
                break

        utils.write_result_to_csv_dense(
            name=f"{args.exp_name}~set={args.dataset}~task={idx}",
            curr_acc1=curr_acc1[idx],
            best_acc1=best_acc1[idx],
            save_dir=args.run_base_dir,
        )

        del optimizer, scheduler, params

        num_tasks_learned += 1

        if args.trainer and "nns" in args.trainer:
            model.apply(lambda m: setattr(
                m, "num_tasks_learned",
                min(model.num_tasks_learned, args.num_tasks)))
        else:
            model.apply(
                lambda m: setattr(m, "num_tasks_learned", num_tasks_learned))

    if args.save:
        torch.save(
            {
                "epoch": args.epochs,
                "arch": args.model,
                "state_dict": model.state_dict(),
                "best_acc1": best_acc1,
                "curr_acc1": curr_acc1,
                "args": args,
            },
            args.run_base_dir / "final.pt",
        )


def sswarmup():
    data_loader = getattr(data_creator, args.dataset)()

    model = utils.get_model(args.model)

    if args.er_sparsity:
        for n, m in model.named_modules():
            if hasattr(m, "sparsity"):
                m.sparsity = min(
                    0.5,
                    args.sparsity
                    * (m.weight.size(0) + m.weight.size(1))
                    / (
                            m.weight.size(0)
                            * m.weight.size(1)
                            * m.weight.size(2)
                            * m.weight.size(3)
                    ),
                )
                print(f"Set sparsity of {n} to {m.sparsity}")

    model = model.to(args.device)
    if args.update_mask == True:
        for idx in range(args.num_tasks):
            cur_rate = 1 - (1 - (args.final_rate ** (args.pruning_iter  / args.prune_iter)))
            save_mask = torch.load(os.path.join(
                args.save_model_path, "single_mask/mask_{}_{}_{:.2f}.th".format(idx, args.pruning_iter , cur_rate)))
            model_dict = model.state_dict()
            state_dict = {k: v for k, v in save_mask.items() if k in model_dict.keys()}
            model_dict.update(state_dict)
            model.load_state_dict(model_dict)
            logger.info(os.path.join(
                args.save_model_path, "single_mask/mask_{}_{}_{:.2f}.th".format(idx, args.pruning_iter , cur_rate))
            )
            print("update_mask：{} done".format(idx))

    writer = SummaryWriter(log_dir=args.run_base_dir)

    trainer = getattr(trainers, "sswarmup")

    if hasattr(trainer, "init"):
        trainer.init(args)

    for p in model.parameters():
        p.grad = None

    trainer.train_cv(model, data_loader, writer)

    if args.save:
        torch.save(
            {
                "epoch": args.epochs,
                "arch": args.model,
                "state_dict": model.state_dict(),
                "args": args,
            },
            args.run_base_dir / "final.pt",
        )


def ss():
    data_loader = getattr(data_creator, args.dataset)()

    if args.num_tasks: # wm:20
        best_acc1 = [0.0 for _ in range(args.num_tasks)]
        curr_acc1 = [0.0 for _ in range(args.num_tasks)]

    criterion = nn.CrossEntropyLoss().to(args.device)
    writer = SummaryWriter(log_dir=args.run_base_dir)

    trainer = getattr(trainers, "trainer_task_first")
    train, test = trainer.train, trainer.test

    if hasattr(trainer, "init"):
        trainer.init(args)

    for idx in range(args.num_tasks or 0):
        for pruning_iter in range(args.prune_iter):
            print('pruning_iter',pruning_iter)
            model = utils.get_model(args.model)
            utils.load_model(model, os.path.join(args.save_model_path,'warmup/warmup_9.th'))
            model = model.to(args.device)
            pruning_iter = pruning_iter + 1
            if pruning_iter > 1 :
                cur_rate = 1 - (1 - (args.final_rate**((pruning_iter-1) / args.prune_iter)))
                save_mask = torch.load(os.path.join(
                    args.save_model_path, "single_mask/mask_{}_{}_{:.2f}.th".format(idx, pruning_iter-1, cur_rate)))
                model_dict = model.state_dict()
                state_dict = {k: v for k, v in save_mask.items() if k in model_dict.keys()}
                model_dict.update(state_dict)
                model.load_state_dict(model_dict)

            logger.info(f"\nTask {args.dataset}: {idx}\n")

            args.cur_task_id = idx
            assert hasattr(
                data_loader, "update_task"
            ), "[ERROR] Need to implement update task method for use with multitask experiments"

            data_loader.update_task(idx)

            for p in model.parameters():
                p.grad = None

            params = []
            for n, p in model.named_parameters():
                if not p.requires_grad:
                    continue
                split = n.split(".")
                if split[-1] == "weight" or split[-1] == "bias" or split[
                    -1] == 'threshold':
                    params.append(p)

            lr = (
                args.train_weight_lr
            )

            if args.optimizer == "adam":
                optimizer = optim.Adam(params, lr=lr, weight_decay=args.wd)
            elif args.optimizer == "rmsprop":
                optimizer = optim.RMSprop(params, lr=lr)
            else:
                optimizer = optim.SGD(
                    params, lr=lr, momentum=args.momentum, weight_decay=args.wd
                )

            train_epochs = args.epochs

            if args.no_scheduler:
                scheduler = None
            else:
                scheduler = CosineAnnealingLR(optimizer, T_max=train_epochs)

            for epoch in range(1, train_epochs + 1):
                train(
                    model,
                    writer,
                    data_loader.train_loader,
                    optimizer,
                    criterion,
                    epoch,
                    idx,
                    data_loader,
                )

                curr_acc1[idx] = test(model, writer, criterion,
                                      data_loader.dev_loader, epoch, idx)
                if curr_acc1[idx] > best_acc1[idx]:
                    best_acc1[idx] = curr_acc1[idx]

                if scheduler:
                    scheduler.step()

                if (args.iter_lim > 0
                        and len(data_loader.train_loader) * epoch > args.iter_lim):
                    break

                if epoch == 100:
                    utils.single_generate_mask(model, idx, pruning_iter, args.final_rate, args.prune_iter)

            del optimizer, scheduler, params

        if args.save:
            torch.save(
                {
                    "epoch": args.epochs,
                    "arch": args.model,
                    "state_dict": model.state_dict(),
                    "best_acc1": best_acc1,
                    "curr_acc1": curr_acc1,
                    "args": args,
                },
                args.run_base_dir / "final.pt",
            )



if __name__ == "__main__":
    main()
