import torch
import torch.optim as optim
from args import args
from torch.optim.lr_scheduler import CosineAnnealingLR,ExponentialLR
import models.module_util as module_util



def prepare_optimizers(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        split = n.split(".")
        if args.train_weight_tasks < 0:
            if split[-1] == "weight" or split[-1] == "bias":
                params_w.append(p)
        if n.find('score') != -1 and int(split[-1]) == idx:
            params_m.append(p)

    if args.optimizer == "adam":
        if len(params_w) != 0:
            optimizer_w = optim.Adam(params_w,
                                     lr=args.train_weight_lr,
                                     weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=args.train_score_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
    if args.train_weight_tasks < 0:
        return optimizer_w, optimizer_m
    else:
        return optimizer_m


def add_sparse_term_v3(loss, model,i_epoch):
    # ccis
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            pos_param = torch.gt(param, 0) * param
            pos_sparse_regularization += torch.sum(pos_param)
    loss = loss + args.alpha * pos_sparse_regularization
    return loss


def add_sparse_term_v4(loss, model,i_epoch):
    # l2 regularization
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            L2_regularization = torch.pow(param - args.intercept, 2)
            pos_sparse_regularization += torch.sum(L2_regularization)
    loss = loss + args.alpha * pos_sparse_regularization
    return loss


def alpha(input,i_epoch):
    if i_epoch < args.alpha_epoch:
        if i_epoch % args.alpha_epoch_1 < args.alpha_epoch_2:
            output = input
        else:
            output = 0
    else:
        output = 0
    return output


def mask_status(model, task_id):
    masks = {}
    m_sparsity = {}

    for n, m in model.named_modules():
        if hasattr(m, "sparsity"):
            for i in range(args.num_tasks):
                m_sparsity[n + ".scores" + '.' + str(i)] = m.sparsity

    if args.pruning_method == 'topK':
        for name, param in model.named_parameters():
            split = name.split(".")
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    mask = module_util.GetSubnet.apply(
                        param.abs(), m_sparsity[name]
                    )
                    masks[name] = mask
    if args.pruning_method == 'threshold':
        for name, param in model.named_parameters():
            split = name.split(".")
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    mask = (param >= 0.).float()
                    masks[name] = mask
    return masks


class LRcheduler_wm():
    def __init__(self, optimizer, T_max_1, lr_epochs = args.lr_epochs):
        self.lr_epochs = lr_epochs
        self.LRcheduler_1 = CosineAnnealingLR(optimizer,T_max=T_max_1,eta_min=args.lr1_min)
        self.LRcheduler_2 = ExponentialLR(optimizer, gamma=0.9)
        self.step_num = 0


    def step(self):
        if self.step_num < self.lr_epochs:
            self.step_num = self.step_num + 1
            return self.LRcheduler_1.step()
        else:
            self.step_num = self.step_num + 1
            return self.LRcheduler_2.step()


    def get_lr(self):
        if self.step_num < self.lr_epochs:
            return self.LRcheduler_1.get_lr()
        else:
            return self.LRcheduler_2.get_lr()
