import math

import torch
import torch.nn as nn
import torch.nn.functional as F
from args import args as pargs
from scipy.stats import ortho_group
from fastNLP import logger
import numpy as np


import models.module_util as module_util

from fastNLP.embeddings.embedding import TokenEmbedding
from fastNLP.core.vocabulary import Vocabulary
from typing import List
from fastNLP.embeddings.utils import _construct_char_vocab_from_vocab
from fastNLP.embeddings.static_embedding import StaticEmbedding
from fastNLP.embeddings.utils import get_embeddings

StandardConv = nn.Conv2d
StandardBN = nn.BatchNorm2d

# The following modules are from
# 'Supermasks in Superposition'


class NonAffineBN(nn.BatchNorm2d):
    def __init__(self, dim):
        super(NonAffineBN, self).__init__(dim, affine=False)


class NonAffineNoStatsBN(nn.BatchNorm2d):
    def __init__(self, dim):
        super(NonAffineNoStatsBN, self).__init__(dim,
                                                 affine=False,
                                                 track_running_stats=False)


class MultitaskNonAffineBN(nn.Module):
    def __init__(self, dim):
        super(MultitaskNonAffineBN, self).__init__()
        self.bns = nn.ModuleList(
            [NonAffineBN(dim) for _ in range(pargs.num_tasks)])
        # self.task = 0

    def forward(self, x):
        return self.bns[pargs.cur_task_id](x)


class MaskConv(nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # initialize the scores
        self.scores = nn.Parameter(module_util.mask_init(self))

        # Turn the gradient on the weights off
        if pargs.train_weight_tasks == 0:
            self.weight.requires_grad = False

        # default sparsity
        self.sparsity = pargs.sparsity

    def forward(self, x):
        subnet = module_util.GetSubnet.apply(self.scores.abs(), self.sparsity)
        w = self.weight * subnet
        x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation,
                     self.groups)
        return x


# Conv from What's Hidden in a Randomly Weighted Neural Network?
class MultitaskMaskConv(nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.scores = nn.ParameterList([
            nn.Parameter(module_util.mask_init(self))
            for _ in range(pargs.num_tasks)
        ])
        if pargs.train_weight_tasks == 0:
            self.weight.requires_grad = False

        self.sparsity = pargs.sparsity

    def cache_masks(self):
        self.register_buffer(
            "stacked",
            torch.stack([
                module_util.get_subnet(self.scores[j].abs(), self.sparsity)
                for j in range(pargs.num_tasks)
            ]),
        )

    def clear_masks(self):
        self.register_buffer("stacked", None)

    def forward(self, x):
        if self.task < 0:
            alpha_weights = self.alphas[:self.num_tasks_learned]
            idxs = (alpha_weights > 0).squeeze().view(self.num_tasks_learned)
            if len(idxs.shape) == 0:
                idxs = idxs.view(1)
            subnet = (alpha_weights[idxs] *
                      self.stacked[:self.num_tasks_learned][idxs]).sum(dim=0)
        else:
            subnet = module_util.GetSubnet.apply(self.scores[self.task].abs(),
                                                 self.sparsity)
        w = self.weight * subnet
        x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation,
                     self.groups)
        return x

    def __repr__(self):
        return f"MultitaskMaskConv({self.in_channels}, {self.out_channels})"


# Init from What's Hidden with masking from Mallya et al. (Piggyback)
class FastMultitaskMaskConv(nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.scores = nn.ParameterList([
            nn.Parameter(module_util.mask_init(self))
            for _ in range(pargs.num_tasks)
        ])

        if pargs.train_weight_tasks == 0:
            self.weight.requires_grad = False

    def cache_masks(self):
        self.register_buffer(
            "stacked",
            torch.stack([
                module_util.get_subnet_fast(self.scores[j])
                for j in range(pargs.num_tasks)
            ]),
        )

    def clear_masks(self):
        self.register_buffer("stacked", None)

    def forward(self, x):
        if self.task < 0:
            alpha_weights = self.alphas[:self.num_tasks_learned]
            idxs = (alpha_weights > 0).squeeze().view(self.num_tasks_learned)
            if len(idxs.shape) == 0:
                idxs = idxs.view(1)
            subnet = (alpha_weights[idxs] *
                      self.stacked[:self.num_tasks_learned][idxs]).sum(dim=0)
        else:
            subnet = module_util.GetSubnetFast.apply(self.scores[self.task])

        w = self.weight * subnet

        x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation,
                     self.groups)
        return x

    def __repr__(self):
        return f"FastMultitaskMaskConv({self.in_channels}, {self.out_channels})"


class BatchEnsembles(nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.s = nn.ParameterList([
            nn.Parameter(module_util.rank_one_init(self).unsqueeze(1))
            for _ in range(pargs.num_tasks)
        ])
        self.t = nn.ParameterList([
            nn.Parameter(module_util.rank_one_initv2(self).unsqueeze(0))
            for _ in range(pargs.num_tasks)
        ])
        if pargs.train_weight_tasks == 0:
            self.weight.requires_grad = False

    def cache_masks(self):
        self.register_buffer(
            "stacked",
            torch.stack([
                torch.mm(self.s[j], self.t[j]).view(*self.weight.shape)
                for j in range(pargs.num_tasks)
            ]),
        )

    def clear_masks(self):
        self.register_buffer("stacked", None)

    def forward(self, x):
        if self.task < 0:
            if not pargs.hard_alphas:
                alpha_weights = F.softmax(self.alphas[:self.num_tasks_learned],
                                          dim=0)
            else:
                alpha_weights = self.alphas[:self.num_tasks_learned]
            subnet = (alpha_weights *
                      self.stacked[:self.num_tasks_learned]).sum(dim=0)
        else:
            subnet = torch.mm(self.s[self.task],
                              self.t[self.task]).view(*self.weight.shape)
        w = self.weight * subnet
        x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation,
                     self.groups)
        return x


class VectorizedBatchEnsembles(nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.s = nn.ParameterList([
            nn.Parameter(module_util.rank_one_init(self).unsqueeze(0))
            for _ in range(pargs.num_tasks)
        ])
        self.t = nn.ParameterList([
            nn.Parameter(module_util.rank_one_initv2(self).unsqueeze(0))
            for _ in range(pargs.num_tasks)
        ])

    def forward(self, x):
        batch_sz = x.size(0)
        if self.task >= 0:
            new_x = x * self.t[self.task].repeat(batch_sz, 1).view(
                batch_sz, self.in_channels, 1, 1)
        else:
            multiplier = torch.stack([
                self.t[j % self.num_tasks_learned].flatten()
                for j in range(batch_sz)
            ], 0).view(batch_sz, self.in_channels, 1, 1)
            new_x = x * multiplier
        out = F.conv2d(
            new_x,
            self.weight,
            self.bias,
            self.stride,
            self.padding,
            self.dilation,
            self.groups,
        )
        if self.task >= 0:
            new_out = out * self.s[self.task].repeat(batch_sz, 1).view(
                batch_sz, self.out_channels, 1, 1)
        else:
            multiplier = torch.stack([
                self.s[j % self.num_tasks_learned].flatten()
                for j in range(batch_sz)
            ], 0).view(batch_sz, self.out_channels, 1, 1)
            new_out = out * multiplier
        return new_out

    def __repr__(self):
        return f"VectorizedBatchEnsembles({self.in_channels}, {self.out_channels})"


class IndividualHeads(nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.scores = nn.ParameterList([
            nn.Parameter(self.weight.data.clone())
            for _ in range(pargs.num_tasks)
        ])
        if pargs.train_weight_tasks == 0:
            self.weight.requires_grad = False

    def forward(self, x):
        w = self.scores[self.task]
        x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation,
                     self.groups)
        return x

    def __repr__(self):
        return f"IndividualHeads({self.in_channels}, {self.out_channels})"


class FastHopMaskBN(nn.BatchNorm2d):
    def __init__(
        self,
        num_features,
        eps=1e-5,
        momentum=0.1,
        affine=False,
        track_running_stats=False,
    ):
        super(FastHopMaskBN, self).__init__(num_features, eps, momentum,
                                            affine, track_running_stats)
        self.scores = nn.ParameterList([
            nn.Parameter(module_util.bn_mask_initv2(self))
            for _ in range(pargs.num_tasks)
        ])
        self.d = num_features
        self.register_parameter("score",
                                nn.Parameter(module_util.bn_mask_init(self)))

    def cache_masks(self):
        with torch.no_grad():
            d = self.d
            W = torch.zeros(d, d).to(pargs.device)
            for j in range(self.num_tasks_learned):
                x = 2 * module_util.get_subnet_fast(self.scores[j]) - 1
                heb = torch.ger(x, x) - torch.eye(d).to(pargs.device)
                h = W.mm(x.unsqueeze(1)).squeeze()
                pre = torch.ger(x, h)
                W = W + (1.0 / d) * (heb - pre - pre.t())
                # W = W + (1. / d) * heb

            self.register_buffer("W", W)

    def clear_masks(self):
        self.register_buffer("W", None)

    def forward(self, input):
        self._check_input_dim(input)

        # exponential_average_factor is set to self.momentum
        # (when it is available) only so that if gets updated
        # in ONNX graph when this node is exported to ONNX.
        if self.momentum is None:
            exponential_average_factor = 0.0
        else:
            exponential_average_factor = self.momentum

        if self.training and self.track_running_stats:
            #  if statement only here to tell the jit to skip emitting this when it is None
            if self.num_batches_tracked is not None:
                self.num_batches_tracked = self.num_batches_tracked + 1
                if self.momentum is None:  # use cumulative moving average
                    exponential_average_factor = 1.0 / float(
                        self.num_batches_tracked)
                else:  # use exponential moving average
                    exponential_average_factor = self.momentum

        if self.task < 0:
            subnet = module_util.GetSubnetFast.apply(self.score)
        else:
            subnet = module_util.GetSubnetFast.apply(self.scores[self.task])
        return F.batch_norm(
            input,
            self.running_mean,
            self.running_var,
            subnet,
            self.bias,
            self.training or not self.track_running_stats,
            exponential_average_factor,
            self.eps,
        )


class PSPRotation(nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        if pargs.ortho_group:
            self.contexts = nn.ParameterList([
                nn.Parameter(
                    torch.from_numpy(
                        ortho_group.rvs(self.in_channels).astype("float32")))
                for _ in range(pargs.num_tasks)
            ])
        else:
            self.contexts = nn.ParameterList([
                nn.Parameter(
                    torch.nn.init.orthogonal_(
                        torch.Tensor(self.in_channels, self.in_channels)))
                for _ in range(pargs.num_tasks)
            ])

        self.scores = nn.ParameterList([
            nn.Parameter(module_util.pspinit(self).squeeze())
            for _ in range(pargs.num_tasks)
        ])

    def cache_weights(self, t):
        out = torch.stack(
            [self.scores[j].mm(self.contexts[j]) for j in range(t)]).sum(dim=0)
        self.register_buffer("weight_sum", out)

    def cache_masks(self):
        self.register_buffer(
            "stacked",
            torch.stack([self.contexts[j] for j in range(pargs.num_tasks)]),
        )

    def clear_masks(self):
        self.register_buffer("stacked", None)

    def forward(self, x):

        x = x.squeeze().t()
        if self.task < 0:
            self.alphas = self.alphas.view(self.alphas.size(0), 1, 1)
            if not pargs.hard_alphas:
                alpha_weights = F.softmax(self.alphas[:self.num_tasks_learned],
                                          dim=0)
                ctx = (alpha_weights *
                       self.stacked[:self.num_tasks_learned]).sum(dim=0)
            else:
                alpha_weights = self.alphas[:self.num_tasks_learned]
                idxs = (alpha_weights > 0).squeeze()
                ctx = (alpha_weights[idxs] *
                       self.stacked[:self.num_tasks_learned][idxs]).sum(dim=0)
            out = ctx.mm(x)
        else:
            out = self.contexts[self.task].mm(x)
        out = self.weight.squeeze().mm(out)
        out = out.t()
        out = out.view(*out.size(), 1, 1)
        return out


class StackedFastMultitaskMaskConv(nn.Conv2d):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.scores = nn.ParameterList([
            nn.Parameter(module_util.mask_init(self))
            for _ in range(pargs.num_tasks)
        ])

        if pargs.train_weight_tasks == 0:
            self.weight.requires_grad = False

    def forward(self, x):
        if self.task < 0:
            stacked = torch.stack([
                module_util.get_subnet_fast(self.scores[j])
                for j in range(min(pargs.num_tasks, self.num_tasks_learned))
            ])
            alpha_weights = self.alphas[:self.num_tasks_learned]
            subnet = (alpha_weights * stacked).sum(dim=0)
        else:
            subnet = module_util.GetSubnetFast.apply(self.scores[self.task])
        w = self.weight * subnet
        x = F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation,
                     self.groups)
        return x

# The following modules are from
# 'Dynamic Sparse Training: Find Efficient Sparse Network From Scratch With Trainable Masked Layers'

class BinaryStep(torch.autograd.Function):
    @staticmethod
    def forward(ctx, input):
        ctx.save_for_backward(input)
        return (input > 0.).float() # 之前的实验是用>=做的

    @staticmethod
    def backward(ctx, grad_output):
        input, = ctx.saved_tensors
        grad_input = grad_output.clone()
        zero_index = torch.abs(input) > 1
        middle_index = (torch.abs(input) <= 1) * (torch.abs(input) > 0.4)
        additional = 2 - 4 * torch.abs(input)
        additional[zero_index] = 0.
        additional[middle_index] = 0.4
        return grad_input * additional

class MaskedConv2d(nn.Module):
    def __init__(self,
                 in_c,
                 out_c,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True):
        super(MaskedConv2d, self).__init__()
        self.in_channels = in_c
        self.out_channels = out_c
        kernel_size = (kernel_size, kernel_size)  # added for code adapation
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.groups = groups

        ## define weight
        self.weight = nn.Parameter(
            torch.Tensor(out_c, in_c // groups, *kernel_size))
        if bias:
            self.bias = nn.Parameter(torch.Tensor(out_c))
        else:
            self.register_parameter('bias', None)
        self.threshold = nn.Parameter(torch.Tensor(out_c))
        self.unit_step = BinaryStep.apply
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
        if self.bias is not None:
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in)
            nn.init.uniform_(self.bias, -bound, bound)
        with torch.no_grad():
            self.threshold.data.fill_(0.)

    def forward(self, x):
        weight_shape = self.weight.shape
        threshold = self.threshold.view(weight_shape[0], -1)
        weight = torch.abs(self.weight)
        weight = weight.view(weight_shape[0], -1)
        weight = weight - threshold
        mask = self.unit_step(weight)
        mask = mask.view(weight_shape)
        ratio = torch.sum(mask) / mask.numel()
        # logger.info("threshold {:3f}".format(self.threshold[0]))
        # logger.info("keep ratio {:.2f}".format(ratio))
        if ratio <= 0.01:
            with torch.no_grad():
                self.threshold.data.fill_(0.)
            threshold = self.threshold.view(weight_shape[0], -1)
            weight = torch.abs(self.weight)
            weight = weight.view(weight_shape[0], -1)
            weight = weight - threshold
            mask = self.unit_step(weight)
            mask = mask.view(weight_shape)
        masked_weight = self.weight * mask

        conv_out = torch.nn.functional.conv2d(x,
                                              masked_weight,
                                              bias=self.bias,
                                              stride=self.stride,
                                              padding=self.padding,
                                              dilation=self.dilation,
                                              groups=self.groups)
        return conv_out

# we proposed
class MaskedConv2dMW_1(nn.Module):
    def __init__(self,
                 in_c,
                 out_c,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True,
                 num_tasks = pargs.num_tasks,
                 sparsity = pargs.sparsity,
                 pruning_method = pargs.pruning_method,
                 init_method = pargs.init_method):
        super(MaskedConv2dMW_1, self).__init__()
        self.in_channels = in_c
        self.out_channels = out_c
        kernel_size = (kernel_size, kernel_size)  # added for code adapation
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.groups = groups
        self.num_tasks = num_tasks
        self.cur_task_id = None
        self.sparsity = sparsity # 新加参数
        self.pruning_method = pruning_method # 新加参数
        self.init_method = init_method


        # define weight
        self.weight = nn.Parameter(
            torch.Tensor(out_c, in_c // groups, *kernel_size))
        if bias:
            self.bias = nn.Parameter(torch.Tensor(out_c))
        else:
            self.register_parameter('bias', None)
        self.scores = nn.ParameterList([])
        for _ in range(num_tasks):
            self.scores.append(
                nn.Parameter(torch.Tensor(out_c, in_c // groups,
                                          *kernel_size))) # 这里确保没问题吗？应该是与supsup,modules.py 65行对应
        # 绝对值的凯明初始化

        # self.threshold_scores = nn.ParameterList([])

        self.unit_step = BinaryStep.apply
        self.init_parameters()

    def init_parameters(self):
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
        threshold_scores = nn.ParameterList([])
        if self.bias is not None:
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in)
            nn.init.uniform_(self.bias, -bound, bound)
        for task_id in range(self.num_tasks):
            if self.init_method == 'kaiming':
                nn.init.kaiming_uniform_(self.scores[task_id], a=math.sqrt(5))

                nn.init.constant_(self.scores[task_id], torch.mean(torch.abs(self.scores[task_id])).item()) # 需要标注的固定值初始化
                if self.pruning_method == 'threshold':
                    threshold_scores.append(nn.Parameter(torch.abs(self.scores[task_id])))

            elif self.init_method == 'lr_constant':
                nn.init.constant_(self.scores[task_id], 100)
                if self.pruning_method == 'threshold':
                    threshold_scores.append(nn.Parameter(torch.abs(self.scores[task_id])))
        if self.pruning_method == 'threshold':
            self.scores = threshold_scores
            del threshold_scores
            # self.threshold_scores.append(nn.Parameter(torch.abs(self.scores[task_id]))) # 控制是否进行绝对值的开明初始化
            # self.threshold_scores.append(nn.Parameter(self.scores[task_id]))
    def forward(self, x):
        if self.pruning_method == 'topK':

            # 使用 testmask 方法时
            # mask = torch.stack(
            #     [
            #         module_util.get_subnet(self.scores[j].abs(), self.sparsity) # 对每一组task的score取绝对值
            #         for j in range(pargs.num_tasks)
            #     ]
            # )
            # # 应该是 supsup 另外的一个模块内容，这里重新做一个测试，把这块注释掉，能否得到与未注释前相同的结果
            # if self.task < 0:
            #     alpha_weights = self.alphas[: self.num_tasks_learned]
            #     idxs = (alpha_weights > 0).squeeze().view(self.num_tasks_learned)  # 对于每一个task都有一个alpha
            #     if len(idxs.shape) == 0:
            #         idxs = idxs.view(1)
            #     subnet = (
            #             alpha_weights[idxs]
            #             * mask[: self.num_tasks_learned][idxs]
            #     ).sum(dim=0)
            # else:
            #     subnet = module_util.GetSubnet.apply(
            #         self.scores[self.task].abs(), self.sparsity
            #     )

            # 使用 wm 方法时
            subnet = module_util.GetSubnet.apply(
                self.scores[pargs.cur_task_id].abs(), self.sparsity
            )
            w = self.weight * subnet
            # print(self.scores[pargs.cur_task_id].numel())
            # print(self.weight.numel())
            # print(subnet.numel())
            conv_out = F.conv2d(
                x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
            )
            return conv_out

        if self.pruning_method == 'threshold':
            # print(self.scores[pargs.cur_task_id])
            if pargs.cur_task_id is None:
                raise RuntimeError(
                    'MUST set current task_id before activate the model')
            mask = self.unit_step(self.scores[pargs.cur_task_id])
            # ratio = torch.sum(mask) / mask.numel()
            # logger.info("keep ratio {:.2f}".format(ratio))
            # if ratio <= 0.01:
            #     masked_weight = self.weight
            # else:
            masked_weight = mask * self.weight
            conv_out = torch.nn.functional.conv2d(x,
                                                  masked_weight,
                                                  bias=self.bias,
                                                  stride=self.stride,
                                                  padding=self.padding,
                                                  dilation=self.dilation,
                                                  groups=self.groups)
            return conv_out


    # def set_current_task(self, task_id):
    #     self.cur_task_id = task_id


class OutMaskedLinear(nn.Module):
    def __init__(self, in_size, out_size, num_tasks=pargs.num_tasks):
        super(OutMaskedLinear, self).__init__()
        self.in_size = in_size
        self.out_size = out_size
        self.num_tasks = num_tasks
        self.weight = nn.Parameter(torch.Tensor(out_size, in_size))
        self.bias = nn.Parameter(torch.Tensor(out_size))
        self.scores = nn.ParameterList([])
        for _ in range(num_tasks):
            self.scores.append(nn.Parameter(torch.Tensor(out_size, in_size)))
        self.unit_step = BinaryStep.apply
        self.pruning_method = 'threshold'
        self.init_parameters()


    def init_parameters(self):
        threshold_scores = nn.ParameterList([])

        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
        if self.bias is not None:
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in)
            nn.init.uniform_(self.bias, -bound, bound)
        for task_id in range(self.num_tasks):
            nn.init.kaiming_uniform_(self.scores[task_id], a=math.sqrt(5))
            if self.pruning_method == 'threshold':
                threshold_scores.append(nn.Parameter(torch.abs(self.scores[task_id])))
                # print(nn.Parameter(torch.abs(self.scores[0])))
        if self.pruning_method == 'threshold':
            self.scores = threshold_scores
            del threshold_scores


    def forward(self, input, task_id=0):

        mask = self.unit_step(self.scores[task_id])

        ratio = torch.sum(mask) / mask.numel()
        # logger.info("keep ratio {:.2f}".format(ratio))

        if ratio <= 0.01:
            masked_weight = self.weight
        else:
            masked_weight = mask * self.weight
        output = torch.nn.functional.linear(input, masked_weight, self.bias)
        return output

    def get_mask(self, task_id):
        masks = {}
        for name, param in self.named_parameters():
            if name.find('score') != -1:
                mask = self.unit_step(param[task_id])
                masks[name] = mask
        return masks


class OutLinear(nn.Module):
    def __init__(self, in_size, out_size, num_tasks=pargs.num_tasks):
        super(OutLinear, self).__init__()
        self.in_size = in_size
        self.out_size = out_size
        self.num_tasks = num_tasks
        self.weight = nn.Parameter(torch.Tensor(out_size, in_size))
        self.bias = nn.Parameter(torch.Tensor(out_size))
        self.unit_step = BinaryStep.apply
        self.init_parameters()

    def init_parameters(self):
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
        if self.bias is not None:
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in)
            nn.init.uniform_(self.bias, -bound, bound)

    def forward(self, input, task_id=0):
        # logger.info("keep ratio {:.2f}".format(ratio))
        masked_weight = self.weight
        output = torch.nn.functional.linear(input, masked_weight, self.bias)
        return output

    def get_mask(self, task_id):
        masks = {}
        for name, param in self.named_parameters():
            if name.find('score') != -1:
                mask = self.unit_step(param[task_id])
                masks[name] = mask
        return masks