import torch


class NSense():
    """
    Neuron Sensitivity
    """

    def __init__(self, model, device, register_layers=4, **kwargs):
        self.model = model
        self.device = device
        self.register_layers = register_layers

    def clear_hooks(self):
        for handle in self.hook_handles:
            handle.remove()
        del self.hook_handles

    def forward_hook(self, module, input, output):
        # print(type(output))
        if type(output) == torch.Tensor:
            self.features.append((module.name, output.cpu()))

    def evaluate(self, adv_xs=None, cln_xs=None, cln_ys=None, adv_ys=None, target_preds=None, target_flag=False):
        self.model.eval()

        nsense = {}
        total = 0

        self.register_name = []
        # print(model)

        self.features = []
        self.hook_handles = []
        for name, module in self.model[1].named_modules():  # torch.nn.Sequential(normalize_layer, model)
            if len(name.split('.')) <= self.register_layers:
                # print(name)
                self.register_name.append(name)
                self.hook_handles.append(module.register_forward_hook(self.forward_hook))
                module.name = name

        with torch.no_grad():
            self.features = []
            self.model(adv_xs.to(self.device))
            self.model(cln_xs.to(self.device))
            batch_size = adv_xs.shape[0]

            num_features = len(self.features) // 2

            for i in range(num_features):
                assert self.features[i][0] == self.features[i + num_features][0]
                module_name = self.features[i][0]
                # print(module_name)
                if module_name not in nsense:
                    # print(self.features[i][1])
                    nsense[module_name] = torch.norm(self.features[i][1] - self.features[i + num_features][1], p=1) / \
                                          self.features[i][1].shape[1]
                else:
                    nsense[module_name] += torch.norm(self.features[i][1] - self.features[i + num_features][1], p=1) / \
                                           self.features[i][1].shape[1]

            for key in nsense:
                nsense[key] /= batch_size
                # print(key, nsense[key])
        ret = 0
        total = 0
        for key in nsense:
            ret += nsense[key]
            total += 1

        self.clear_hooks()
        return ret / total

        # for data_clean, label in tqdm(clean_loader):
        #     data_clean = data_clean.cuda()
        #     data = gen_adv(self.model, data_clean)
        #     if use_cuda:
        #         data, label = data.cuda(), label.cuda()

        #     # register hook

        #     new_reg_name = []
        #     for name in self.register_name:
        #         try:
        #             hook = eval("self.model." + name).register_forward_hook(forward_hook)
        #             hooks.append(hook)
        #             new_reg_name.append(name)
        #         except Exception as e:
        #             pass

        #     with torch.no_grad():
        #         self.model(data)
        #         self.model(data_clean)
        #         num_features = len(new_reg_name)

        #         for i in range(num_features):
        #             if new_reg_name[i] not in nsense:
        #                 nsense[new_reg_name[i]] = torch.norm(features[i] - features[i+num_features], p=1) / features[i].shape[1]
        #             else:
        #                 nsense[new_reg_name[i]] += torch.norm(features[i] - features[i+num_features], p=1) / features[i].shape[1]
        #         total += data.shape[0]

        #     for hook in hooks:
        #         hook.remove()

        # for key in nsense:
        #     nsense[key] /= total

        # return nsense
