# Copyright (c) OpenMMLab. All rights reserved.
from tools.CKA.func_cka import cka_similarity
from ..builder import CLASSIFIERS, build_backbone, build_head, build_neck
from ..heads import MultiLabelClsHead
from ..utils.augment import Augments
from .base import BaseClassifier
import torch

from globals import sets



def cka_loss(features1, features2):
    """
    计算两个特征矩阵之间的 CKA 值（相似性越高，值越接近 1）。
    输入：
        features1: [batch_size, feature_dim]
        features2: [batch_size, feature_dim]
    返回：
        cka: 标量
    """
    # 确保输入是浮点类型
    features1 = features1.float()
    features2 = features2.float()
    
    # 中心化核矩阵
    def center_kernel(K):
        n = K.size(0)
        eye = torch.eye(n, device=K.device)
        H = eye - (1.0 / n) * torch.ones(n, n, device=K.device)
        return H @ K @ H  # K_centered = H*K*H

    # 计算核矩阵（线性核）
    K = features1 @ features1.t()  # [batch_size, batch_size]
    L = features2 @ features2.t()  # [batch_size, batch_size]

    # 中心化
    K_centered = center_kernel(K)
    L_centered = center_kernel(L)

    # 计算 HSIC
    hsic = torch.trace(K_centered @ L_centered)  # HSIC(K, L)

    # 计算归一化因子
    norm_K = torch.trace(K_centered @ K_centered).sqrt()
    norm_L = torch.trace(L_centered @ L_centered).sqrt()

    # CKA = HSIC(K, L) / (sqrt(HSIC(K, K)) * sqrt(HSIC(L, L)))
    cka = hsic / (norm_K * norm_L + 1e-8)  # 避免除零

    return cka




@CLASSIFIERS.register_module()
class ImageClassifier(BaseClassifier):

    def __init__(self,
                 backbone,
                 neck=None,
                 head=None,
                 pretrained=None,
                 train_cfg=None,
                 init_cfg=None):
        super(ImageClassifier, self).__init__(init_cfg)

        if pretrained is not None:
            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
        self.backbone = build_backbone(backbone)

        if neck is not None:
            self.neck = build_neck(neck)

        if head is not None:
            self.head = build_head(head)

        self.augments = None
        if train_cfg is not None:
            augments_cfg = train_cfg.get('augments', None)
            if augments_cfg is not None:
                self.augments = Augments(augments_cfg)

    def forward_dummy(self, img):
        """Used for computing network flops.

        See `mmclassificaiton/tools/analysis_tools/get_flops.py`
        """
        return self.extract_feat(img, stage='pre_logits')

    def extract_feat(self, img, output_attentions=False, stage='neck'):
        """Directly extract features from the specified stage.

        Args:
            img (Tensor): The input images. The shape of it should be
                ``(num_samples, num_channels, *img_shape)``.
            stage (str): Which stage to output the feature. Choose from
                "backbone", "neck" and "pre_logits". Defaults to "neck".

        Returns:
            tuple | Tensor: The output of specified stage.
                The output depends on detailed implementation. In general, the
                output of backbone and neck is a tuple and the output of
                pre_logits is a tensor.

        Examples:
            1. Backbone output

            >>> import torch
            >>> from mmcv import Config
            >>> from mmcls.models import build_classifier
            >>>
            >>> cfg = Config.fromfile('configs/resnet/resnet18_8xb32_in1k.py').model
            >>> cfg.backbone.out_indices = (0, 1, 2, 3)  # Output multi-scale feature maps
            >>> model = build_classifier(cfg)
            >>> outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='backbone')
            >>> for out in outs:
            ...     print(out.shape)
            torch.Size([1, 64, 56, 56])
            torch.Size([1, 128, 28, 28])
            torch.Size([1, 256, 14, 14])
            torch.Size([1, 512, 7, 7])

            2. Neck output

            >>> import torch
            >>> from mmcv import Config
            >>> from mmcls.models import build_classifier
            >>>
            >>> cfg = Config.fromfile('configs/resnet/resnet18_8xb32_in1k.py').model
            >>> cfg.backbone.out_indices = (0, 1, 2, 3)  # Output multi-scale feature maps
            >>> model = build_classifier(cfg)
            >>>
            >>> outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='neck')
            >>> for out in outs:
            ...     print(out.shape)
            torch.Size([1, 64])
            torch.Size([1, 128])
            torch.Size([1, 256])
            torch.Size([1, 512])

            3. Pre-logits output (without the final linear classifier head)

            >>> import torch
            >>> from mmcv import Config
            >>> from mmcls.models import build_classifier
            >>>
            >>> cfg = Config.fromfile('configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py').model
            >>> model = build_classifier(cfg)
            >>>
            >>> out = model.extract_feat(torch.rand(1, 3, 224, 224), stage='pre_logits')
            >>> print(out.shape)  # The hidden dims in head is 3072
            torch.Size([1, 3072])
        """  # noqa: E501
        assert stage in ['backbone', 'neck', 'pre_logits'], \
            (f'Invalid output stage "{stage}", please choose from "backbone", '
             '"neck" and "pre_logits"')
        if output_attentions:
            x, attention_weights = self.backbone(img)
        else:
            x = self.backbone(img)
        if stage == 'backbone':
            return x

        if self.with_neck:
            x = self.neck(x)
        if stage == 'neck':
            if output_attentions:
                return x, attention_weights
            else:
                return x

        if self.with_head and hasattr(self.head, 'pre_logits'):
            x = self.head.pre_logits(x)

        return x

    def forward_train(self, img, gt_label, **kwargs):
        """Forward computation during training.

        Args:
            img (Tensor): of shape (N, C, H, W) encoding input images.
                Typically these should be mean centered and std scaled.
            gt_label (Tensor): It should be of shape (N, 1) encoding the
                ground-truth label of input images for single label  . It
                should be of shape (N, C) encoding the ground-truth label
                of input images for multi-labels task.
        Returns:
            dict[str, Tensor]: a dictionary of loss components
        """
        if self.augments is not None:
            img, gt_label = self.augments(img, gt_label)

        x = self.extract_feat(img)
        
        losses = dict()
        loss = self.head.forward_train(x, gt_label)

        losses.update(loss)

        return losses




    def simple_test(self, img, img_metas=None, **kwargs):
        """Test without augmentation."""
        x = self.extract_feat(img)

        if isinstance(self.head, MultiLabelClsHead):
            assert 'softmax' not in kwargs, (
                'Please use `sigmoid` instead of `softmax` '
                'in multi-label tasks.')
        res = self.head.simple_test(x, **kwargs)

        return res

@CLASSIFIERS.register_module()
class TNSE_ImageClassifier(ImageClassifier):
    def __init__(self,
                 backbone,
                 neck=None,
                 head=None,
                 pretrained=None,
                 train_cfg=None,
                 init_cfg=None):
        super().__init__(
                 backbone,
                 neck = neck,
                 head = head,
                 pretrained = pretrained,
                 train_cfg = train_cfg,
                 init_cfg = init_cfg)


    def forward_train(self, img, gt_label=None, **kwargs):
        if self.augments is not None:
            img, gt_label = self.augments(img, gt_label)

        x = self.extract_feat(img)
        return x[-1]


@CLASSIFIERS.register_module()
class Sep_ImageClassifier(ImageClassifier):
    def __init__(self,
                 backbone,
                 neck=None,
                 head=None,
                 pretrained=None,
                 train_cfg=None,
                 init_cfg=None,
                 _lambda = 10,
                 ):
        super().__init__(
                 backbone,
                 neck = neck,
                 head = head,
                 pretrained = pretrained,
                 train_cfg = train_cfg,
                 init_cfg = init_cfg)
        self._lambda = _lambda/10


    def _cka_similarity(self,feat1,img):
        feat2 = sets.model2.extract_feat(img)
        if isinstance(feat1,tuple):
            feat1 = feat1[0]
        if isinstance(feat2,tuple):
            feat2 = feat2[0]
        # sim = cka_similarity(feat1,feat2)
        sim = cka_loss(feat1,feat2)
        return sim

    def forward_train(self, img, gt_label=None, output_attentions=False, **kwargs):
        if self.augments is not None:
            img, gt_label = self.augments(img, gt_label)

        if output_attentions:
            x,attention_weights = self.extract_feat(img, output_attentions)
        else:
            x = self.extract_feat(img, output_attentions)
        if gt_label==None:
            if output_attentions:
                return x[-1], attention_weights
            return x[-1]
        
        losses = dict()
        loss = self.head.forward_train(x, gt_label)
        loss['loss'] = self._lambda*loss['loss']
        if sets.model2:
            sim_loss = self._cka_similarity(x,img)
            loss['sim_loss']=(1-self._lambda)*(1-sim_loss)

        losses.update(loss)

        return losses







# @CLASSIFIERS.register_module()
# class Sep_ImageClassifier2(ImageClassifier):
#     def __init__(self,
#                  backbone,
#                  neck=None,
#                  head=None,
#                  pretrained=None,
#                  train_cfg=None,
#                  init_cfg=None):
#         super().__init__(
#                  backbone,
#                  neck = neck,
#                  head = head,
#                  pretrained = pretrained,
#                  train_cfg = train_cfg,
#                  init_cfg = init_cfg)




#     def forward_train(self, img, gt_label=None, output_attentions=False, **kwargs):
#         if self.augments is not None:
#             img, gt_label = self.augments(img, gt_label)

#         if output_attentions:
#             x,attention_weights = self.extract_feat(img, output_attentions)
#         else:
#             x = self.extract_feat(img, output_attentions)
#         if gt_label==None:
#             if output_attentions:
#                 return x[-1], attention_weights
#             return x[-1]
        
#         losses = dict()
#         loss = self.head.forward_train(x, gt_label)


#         losses.update(loss)

#         return losses


#     def compute_feature_separation(self, x, labels):
#         '''
#             x.shape:(4,768)
#             labels.shape:(4,19)
#         '''
#         layers = len(x)-1

#         seps = 0
#         for layer in range(layers):
#             features = x[layer]
#             num_classes = labels.shape[-1]


#             features_list = []
#             features_class_centered_list = []

#             class_means_list = []

#             for k in range(num_classes):
#                 class_features = features[labels[:,k]==1]
#                 if class_features.shape[0]>0:
#                     features_list.append(class_features)
#                     class_mean = class_features.mean(dim=0, keepdim=True)
#                     features_class_centered = class_features - class_mean
#                     class_means_list.append(class_mean)
#                     features_class_centered_list.append(features_class_centered)
#             aug_features = torch.cat(features_list, dim=0)  #(614,768)
#             class_means = torch.cat(class_means_list, dim=0)
#             global_mean = features.mean(dim=0,keepdim=True)   #(1,768)
#             aug_features_class_centered = torch.cat(features_class_centered_list, dim=0) #(614,768)
#             class_means_global_centered = class_means - global_mean #(19,768)

#             sigma_W = (aug_features_class_centered.T @ aug_features_class_centered)/aug_features.shape[0] #(614,614)
#             sigma_B = (class_means_global_centered.T @ class_means_global_centered)/class_means.shape[0] #(19,19)

#             if torch.trace(sigma_B) == 0:
#                 sep_measure = torch.trace(sigma_W)*10
#             else:
#                 sep_measure = torch.trace(sigma_W) / torch.trace(sigma_B) # lower the better
#             # sep_measure = torch.trace(sigma_W)*50

#             seps += sep_measure
#         return seps
