import torch
import torch.optim as optim
from args import args
from torch.optim.lr_scheduler import CosineAnnealingLR,ExponentialLR
import models.module_util as module_util
from fastNLP import logger


def prepare_optimizers(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        split = n.split(".")
        if args.train_weight_tasks < 0:
            if split[-1] == "weight" or split[-1] == "bias":
                params_w.append(p)
        if n.find('score') != -1 and int(split[-1]) == idx:
            params_m.append(p)

    if args.optimizer == "adam":
        if len(params_w) != 0:
            optimizer_w = optim.Adam(params_w,
                                     lr=args.train_weight_lr,
                                     weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=args.train_score_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)
    if args.train_weight_tasks < 0:
        return optimizer_w, optimizer_m
    else:
        return optimizer_m

def prepare_optimizers_nlp(model,idx):
    params_w, params_m = [], []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        # print('all: ', n)
        split = n.split(".")
        if args.train_weight_tasks < 0:
            # if split[-1] == "weight" or split[-1] == "bias":
            if n.find('weight') != -1 or n.find('bias') != -1:
                # print("weight: ",n)
                params_w.append(p)
        if split[-2] == str("score_{}".format(idx)):
        # if n.find('score') != -1 and int(split[-1]) == idx
            # print("score",n)
            params_m.append(p)


    # if len(params_w) == 0 or len(params_m) == 0:
    #     raise RuntimeError('check target_param being "weights" or "masks" \n\
    # and check masks being set in the network')


    if args.optimizer == "adam":

        optimizer_w = optim.Adam(params_w,
                                 lr=args.train_weight_lr,  # 这个设置成 0
                                 weight_decay=args.wd)
        optimizer_m = optim.Adam(params_m,
                                 lr=args.train_score_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)

        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)

    return optimizer_w, optimizer_m
    # else:
    #     return optimizer_w

def prepare_weight_optimizers_nlp(model):
    params_w = []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        # print('weight' ,n)
        if n.find('weight') != -1 or n.find('bias') != -1:
            params_w.append(p)


    if args.optimizer == "adam":

        optimizer_w = optim.Adam(params_w,
                                 lr=args.train_weight_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_w = optim.RMSprop(params_w, lr=args.train_weight_lr)
    else:
        optimizer_w = optim.SGD(params_w,
                                lr=args.train_weight_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)

    return optimizer_w

def prepare_score_optimizers_nlp(model):
    params_m = []
    for n, p in model.named_parameters():
        if not p.requires_grad:
            continue
        if n.find('score') != -1:
            # print('score', n)
            params_m.append(p)


    if args.optimizer == "adam":
        optimizer_m = optim.Adam(params_m,
                                 lr=args.train_score_lr,
                                 weight_decay=args.wd)
    elif args.optimizer == "rmsprop":
        optimizer_m = optim.RMSprop(params_m, lr=args.train_score_lr)
    else:
        optimizer_m = optim.SGD(params_m,
                                lr=args.train_score_lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)

    return optimizer_m

def add_sparse_term_v3(loss, model,i_epoch):
    # ccis
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            pos_param = torch.gt(param, 0) * param
            pos_sparse_regularization += torch.sum(pos_param)
    loss = loss + args.alpha * pos_sparse_regularization
    return loss


def add_sparse_term_v4(loss, model,i_epoch):
    # l2 regularization
    pos_sparse_regularization = torch.tensor(0.).to(args.device)
    for name, param in model.named_parameters():
        if name.find('scores') != -1:
            L2_regularization = torch.pow(param - args.intercept, 2)
            pos_sparse_regularization += torch.sum(L2_regularization)
    loss = loss + args.alpha * pos_sparse_regularization
    return loss


def alpha(input,i_epoch):
    if i_epoch < args.alpha_epoch:
        if i_epoch % args.alpha_epoch_1 < args.alpha_epoch_2:
            output = input
        else:
            output = 0
    else:
        output = 0
    return output


def mask_status(model, task_id):
    masks = {}
    m_sparsity = {}

    for n, m in model.named_modules():
        if hasattr(m, "sparsity"):
            for i in range(args.num_tasks):
                m_sparsity[n + ".scores" + '.' + str(i)] = m.sparsity

    if args.pruning_method == 'topK':
        for name, param in model.named_parameters():
            split = name.split(".")
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    mask = module_util.GetSubnet.apply(
                        param.abs(), m_sparsity[name]
                    )
                    masks[name] = mask
    if args.pruning_method == 'threshold':
        for name, param in model.named_parameters():
            split = name.split(".")
            if split[-2] in ["scores"]:
                if split[-1] == str(task_id):
                    mask = (param >= 0.).float()
                    masks[name] = mask
    return masks


class LRcheduler_wm():
    def __init__(self, optimizer, T_max, lr_epochs = args.lr_epochs):
        self.lr_epochs = lr_epochs
        self.LRcheduler_1 = CosineAnnealingLR(optimizer,T_max=T_max,eta_min=args.lr1_min)
        self.LRcheduler_2 = ExponentialLR(optimizer, gamma=0.9)
        self.step_num = 0


    def step(self):
        if self.step_num < self.lr_epochs:
            self.step_num = self.step_num + 1
            return self.LRcheduler_1.step()
        else:
            self.step_num = self.step_num + 1
            return self.LRcheduler_2.step()


    def get_lr(self):
        if self.step_num < self.lr_epochs:
            return self.LRcheduler_1.get_lr()
        else:
            return self.LRcheduler_2.get_lr()

def mask_status_nlp(model, writer, i_epoch, task_lst):
    Mean_Parameter_Num = 0
    if args.threshold_pruning:
        for i in range(args.num_tasks):
            all_params_num = 0
            mask_params_num = 0
            for name, param in model.named_parameters():
                split = name.split(".")
                if split[-2] == str("score_{}".format(i)):
                    mask = (param > 0.).float()
                    mask_params_num += torch.sum(mask)
                    all_params_num += param.numel()
                    Mean_Parameter_Num += torch.sum(mask)
            logger.info(f"num_params : task: {i}, num: {mask_params_num}")
            args.num_params[i] = mask_params_num  # 在csv中打印模型有多少参数
            writer.add_scalar(f"train/task_{i}/overlap", mask_params_num / all_params_num, i_epoch)
        Mean_Parameter_Num = Mean_Parameter_Num / len(task_lst)
    else:
        for i in range(args.num_tasks):
            all_params_num = 0
            mask_params_num = 0
            for name, param in model.named_parameters():
                split = name.split(".")
                if split[-2] == str("score_{}".format(i)):
                    mask = module_util.GetSubnet.apply(param.abs(), args.sparsity)
                    mask_params_num += torch.sum(mask)
                    all_params_num += param.numel()
                    Mean_Parameter_Num += torch.sum(mask)
            logger.info(f"num_params : task: {i}, num: {mask_params_num}")
            args.num_params[i] = mask_params_num  # 在csv中打印模型有多少参数
            writer.add_scalar(f"train/task_{i}/overlap", mask_params_num / all_params_num, i_epoch)
        Mean_Parameter_Num = Mean_Parameter_Num / len(task_lst)
    return Mean_Parameter_Num

def mask_status_nlp_v2(model, writer, epoch, task_lst):
    # 阈值梯度下降
    Mean_Parameter_Num = 0
    if args.threshold_pruning:
        for i in range(args.num_tasks):
            all_params_num = 0
            mask_params_num = 0
            for name, param in model.named_parameters():
                split = name.split(".")
                if split[-2] == str("score_{}".format(i)):
                    mask = (param > threshold(epoch)).float()
                    mask_params_num += torch.sum(mask)
                    all_params_num += param.numel()
                    Mean_Parameter_Num += torch.sum(mask)
            logger.info(f"num_params : task: {i}, num: {mask_params_num}")
            args.num_params[i] = mask_params_num  # 在csv中打印模型有多少参数
            writer.add_scalar(f"train/task_{i}/overlap", mask_params_num / all_params_num, epoch)
        Mean_Parameter_Num = Mean_Parameter_Num / len(task_lst)
    else:
        for i in range(args.num_tasks):
            all_params_num = 0
            mask_params_num = 0
            for name, param in model.named_parameters():
                split = name.split(".")
                if split[-2] == str("score_{}".format(i)):
                    mask = module_util.GetSubnet.apply(param.abs(), args.sparsity)
                    mask_params_num += torch.sum(mask)
                    all_params_num += param.numel()
                    Mean_Parameter_Num += torch.sum(mask)
            logger.info(f"num_params : task: {i}, num: {mask_params_num}")
            args.num_params[i] = mask_params_num  # 在csv中打印模型有多少参数
            writer.add_scalar(f"train/task_{i}/overlap", mask_params_num / all_params_num, epoch)
        Mean_Parameter_Num = Mean_Parameter_Num / len(task_lst)
    return Mean_Parameter_Num

def reg_threshold(epoch):
    if epoch < 10:
        input = float(0.09)
        return input
    elif 10 <= epoch < 20:
        input = float(0.065)
        return input
    elif 20 <= epoch < 30:
        input = float(0.04)
        return input
    elif 30 <= epoch < 40:
        input = float(0.015)
        return input
    else:
        input = float(-0.01)
        return input

def threshold(epoch):
    if epoch < 10:
        input = float(0.1)
        return input
    elif 10 <= epoch < 20:
        input = float(0.075)
        return input
    elif 20 <= epoch < 30:
        input = float(0.05)
        return input
    elif 30 <= epoch < 40:
        input = float(0.025)
        return input
    else:
        input = float(0.)
        return input

def mask_status_nlp_batchfirst(model, task_id):
    masks = {}
    for name, param in model.named_parameters():
        split = name.split(".")
        if split[-2] == str("score_{}".format(task_id)):
            # print(name)
            mask = (param > 0.).float()
            masks[name] = mask
    return masks


