import torch


class MPath:
    """
    Neuron Sensitivity
    """

    def __init__(self, model, device, register_layers=4, **kwargs):
        self.model = model
        self.device = device
        self.register_layers = register_layers

    def clear_hooks(self):
        for handle in self.hook_handles:
            handle.remove()
        del self.hook_handles

    def forward_hook(self, module, input, output):
        # print(type(output))
        if type(output) == torch.Tensor:
            self.features.append((module.name, output.cpu()))

    def evaluate(
        self,
        adv_xs=None,
        cln_xs=None,
        cln_ys=None,
        adv_ys=None,
        target_preds=None,
        target_flag=False,
        thres_hold=0.5,
    ):
        self.model.eval()

        nsense = {}
        total = 0

        self.register_name = []
        # print(model)

        self.features = []
        self.hook_handles = []
        for name, module in self.model[
            1
        ].named_modules():  # torch.nn.Sequential(normalize_layer, model)
            if len(name.split('.')) <= self.register_layers:
                # print(name)
                self.register_name.append(name)
                self.hook_handles.append(
                    module.register_forward_hook(self.forward_hook)
                )
                module.name = name

        with torch.no_grad():
            self.features = []
            self.model(adv_xs.to(self.device))
            self.model(cln_xs.to(self.device))
            batch_size = adv_xs.shape[0]

            num_features = len(self.features) // 2

            for i in range(num_features):
                assert self.features[i][0] == self.features[i + num_features][0]
                module_name = self.features[i][0]
                # print(module_name)
                if module_name not in nsense:
                    # print(self.features[i][1])
                    nsense[module_name] = (
                        torch.norm(
                            self.features[i][1] - self.features[i + num_features][1],
                            p=1,
                        )
                        / self.features[i][1].shape[1]
                    )
                else:
                    nsense[module_name] += (
                        torch.norm(
                            self.features[i][1] - self.features[i + num_features][1],
                            p=1,
                        )
                        / self.features[i][1].shape[1]
                    )

            for key in nsense:
                nsense[key] /= batch_size
                # print(key, nsense[key])
        ret = 0
        total = 0
        for key in nsense:
            if nsense[key] > thres_hold:
                ret += 1
            total += 1

        self.clear_hooks()
        return ret / total
