# ---------------------------------------------------------------------------------------------------
# CLIP-DINOiser
# authors: Monika Wysoczanska, Warsaw University of Technology & Oriane Simeoni, valeo.ai
# ---------------------------------------------------------------------------------------------------

import os

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from mmseg.ops import resize
from omegaconf import OmegaConf

from models.builder import MODELS, build_model
# from models.FOUND.model import get_vit_encoder
import copy
from typing import Dict, Union
NORMALIZE = T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))


import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule, auto_fp16
import warnings
from .dino import vision_transformers as vits


try:
    from segment_anything import sam_model_registry, SamPredictor
    from segment_anything.utils.transforms import ResizeLongestSide
except ImportError:
    sam_model_registry = None

def get_vit_encoder(vit_arch, vit_model, vit_patch_size, enc_type_feats):
    if vit_arch == "vit_small" and vit_patch_size == 16:
        url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
        initial_dim = 384
    elif vit_arch == "vit_small" and vit_patch_size == 8:
        url = "dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth"
        initial_dim = 384
    elif vit_arch == "vit_base" and vit_patch_size == 16:
        if vit_model == "clip":
            url = "5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"
        elif vit_model == "dino":
            url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
        initial_dim = 768
    elif vit_arch == "vit_base" and vit_patch_size == 8:
        url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
        initial_dim = 768

    if vit_model == "dino":
        vit_encoder = vits.__dict__[vit_arch](patch_size=vit_patch_size, num_classes=0)
        # TODO change if want to have last layer not unfrozen
        for p in vit_encoder.parameters():
            p.requires_grad = False
        vit_encoder.eval().cuda()  # mode eval
        state_dict = torch.hub.load_state_dict_from_url(
            url="https://dl.fbaipublicfiles.com/dino/" + url
        )
        vit_encoder.load_state_dict(state_dict, strict=True)

        hook_features = {}
        if enc_type_feats in ["k", "q", "v", "qkv", "mlp"]:
            # Define the hook
            def hook_fn_forward_qkv(module, input, output):
                hook_features["qkv"] = output

            vit_encoder._modules["blocks"][-1]._modules["attn"]._modules[
                "qkv"
            ].register_forward_hook(hook_fn_forward_qkv)

    elif vit_model in ("dinov2", "dinov2_reg"):
        # ---- 是否使用寄存器 ----
        use_registers = (vit_model == "dinov2_reg")

        # ---- 名称映射（带/不带寄存器两套）----
        if vit_arch == "vit_small" and vit_patch_size == 14:
            dinov2_model_name = "dinov2_vits14_reg" if use_registers else "dinov2_vits14"
            initial_dim = 384
        elif vit_arch == "vit_base" and vit_patch_size == 14:
            dinov2_model_name = "dinov2_vitb14_reg" if use_registers else "dinov2_vitb14"
            initial_dim = 768
        elif vit_arch == "vit_large" and vit_patch_size == 14:
            dinov2_model_name = "dinov2_vitl14_reg" if use_registers else "dinov2_vitl14"
            initial_dim = 1024
        elif vit_arch == "vit_giant" and vit_patch_size == 14:
            dinov2_model_name = "dinov2_vitg14_reg" if use_registers else "dinov2_vitg14"
            initial_dim = 1536
        else:
            raise ValueError(f"[DINOv2] Unsupported combo: arch={vit_arch}, patch={vit_patch_size}. DINOv2通常是14x14。")

        # ---- 加载方式：优先本地（可选），否则 hub ----
        hook_features = {}
        hook_features["num_register_tokens"] = 4 if use_registers else 0

        dinov2_local_dir = os.environ.get("DINOV2_LOCAL_DIR", "")    # 可选：本地 clone 的 dinov2 仓库路径（含 hubconf.py）
        dinov2_ckpt_path = os.environ.get("DINOV2_CKPT_PATH", "")    # 可选：本地权重 .pth（若提供将覆盖）
        
        # 从本地代码库载入模型,官方会更新引入新特性，不兼容python3.9
        dinov2_local_dir = '/space/liangc/code/ovss/dinov2' # 集群  https://github.com/InternRobotics/PPI/issues/4
        # dinov2_local_dir = '/space0/liangc/code/study/dinov2' # debug 
        
        
        if dinov2_local_dir and os.path.isdir(dinov2_local_dir):
            # 完全离线加载
            vit_encoder = torch.hub.load(dinov2_local_dir, dinov2_model_name, source='local', trust_repo=True, pretrained=True)
        else:
            # 在线 hub（需要能访问 GitHub；离线环境请设置 DINOV2_LOCAL_DIR）
            vit_encoder = torch.hub.load('facebookresearch/dinov2', dinov2_model_name, trust_repo=True)

        # 如指定了本地权重，优先覆盖
        if dinov2_ckpt_path and os.path.isfile(dinov2_ckpt_path):
            sd = torch.load(dinov2_ckpt_path, map_location='cpu')
            vit_encoder.load_state_dict(sd, strict=False)

        # 冻结 & eval
        for p in vit_encoder.parameters():
            p.requires_grad = False
        vit_encoder.eval().cuda()

        # ---- 注册 hooks（qkv / mlp）与 DINOv1 保持相同键名 ----
        last_blocks = vit_encoder._modules.get("blocks", None)
        assert last_blocks is not None and len(last_blocks) > 0, "[DINOv2] 未找到 blocks"
        last_block = last_blocks[-1]

        attn = getattr(last_block, "attn", None) or getattr(last_block, "attention", None)
        assert attn is not None, "[DINOv2] 未找到 attention 模块"

        if hasattr(attn, "qkv"):
            def _hook_qkv(module, inp, out):
                # out: (B, Tokens, 3*dim)
                hook_features["qkv"] = out
            attn.qkv.register_forward_hook(_hook_qkv)

    return vit_encoder, initial_dim, hook_features



@MODELS.register_module()
class DinoCLIPCustom(nn.Module):
    """
    Base model for all the backbones. Implements CLIP features refinement based on DINO dense features and background
    refinement using FOUND model.

    """


    def __init__(self, clip_backbone, class_names, train_block_num:Union[int, list],  vit_arch="vit_base", vit_model_name = 'dino', vit_patch_size=16, enc_type_feats="k",
                gamma=0.2, delta=0.99, apply_found=False, train_proj=False, attn_pooling=True, sam_cfg=None):
        super(DinoCLIPCustom, self).__init__()
        if sam_cfg is not None and not isinstance(sam_cfg, dict):
            sam_cfg = OmegaConf.to_container(sam_cfg, resolve=True)
        self.attn_pooling = attn_pooling
        self.vit_arch = vit_arch
        self.vit_model_name = vit_model_name  # 新添加的用于不仅仅控制是dino，还可以是dinov2等
        self.enc_type_feats = enc_type_feats
        # dino attention参数，是置信度阈值
        self.gamma = gamma
        self.vit_patch_size = vit_patch_size
        self.apply_found = apply_found
        self.delta = delta
        self.sam_cfg = sam_cfg or {}
        self.sam_image_encoder = None
        self.sam_feat_hw = None
        self.sam_patch_size = self.sam_cfg.get('patch_size', 16)
        self.sam_image_size = self.sam_cfg.get('image_size', 1024)
        self.sam_gamma = self.sam_cfg.get('gamma', None)
        self.register_buffer('_sam_pixel_mean', torch.tensor([123.675, 116.28, 103.53], dtype=torch.float32).view(1, 3, 1, 1) / 255.0, persistent=False)
        self.register_buffer('_sam_pixel_std', torch.tensor([58.395, 57.12, 57.375], dtype=torch.float32).view(1, 3, 1, 1) / 255.0, persistent=False)
        # 设置从那一层的clip vit block开始，后面层的参数是需要训练的
        self.train_begin_block_num = train_block_num
        print(f'训练开始的block数：{self.train_begin_block_num}')
        # 是否训练clip 的proj层
        self.train_proj = train_proj
        print(f'是否训练clip的proj层：{self.train_proj}')

        # ==== build MaskCLIP backbone =====
        maskclip_cfg = OmegaConf.load(f"configs/{clip_backbone}.yaml")
        self.maskclip_cfg = maskclip_cfg

        # 这里是一个maskclip模型，其实就是使用CLIP初始化，再最后一个block前注册hook，得到v后手动前馈到ln_post-> seg_head
        self.clip_backbone_freeze = build_model(maskclip_cfg["model"], class_names=class_names)
        # 需要被训练的CLIP模型
        self.clip_backbone_train = build_model(maskclip_cfg["model"], class_names=class_names)
        # 获取maskclip中load好的CLIP模型的共享的部分

        self.hook_handlers = []  # 存储所有hook的handler

        # 提取最后一个block 的深拷贝作为 encoder_upper
        # 原本这里应该定义clip的最后一层，但是在得到V的时候，网络已经前馈一遍了，所以这里不需要在申请，只需要在注册一个钩子就可以
        # self.encoder_orin = self.visual_encoder.transformer.resblocks[-1]
        # 这里设置教师模型冻结
        # self.clip_backbone_freeze.eval()
        for param in self.clip_backbone_freeze.parameters():
            param.requires_grad = False

        # 注册一个hook 在用预训练的clip层上，获取注意力权重
        self.hook_features_trn = {}
        def hook_fn_attn(module, input, output):
            if "clip_trn_attn" not in self.hook_features_trn:
                self.hook_features_trn["clip_trn_attn"] = []  # 初始化为空列表
            self.hook_features_trn["clip_trn_attn"].append(output)  # 依次存储每个 block 的 attn 输出

        self.set_trainable_blocks(self.train_begin_block_num, hook_fn_attn)

    def set_trainable_blocks(self, train_begin_block_num: list, hook_fn_attn):
        """
        根据传入的层索引列表，设置模型中的哪些层可以训练，并注册 Hook。

        Args:
            train_begin_block_num (list): 需要训练的层的索引列表，例如 [3, 5, 7]。
            hook_fn_attn (function): 用于注册的 forward hook 函数。
        """
        # 遍历模型中的所有 transformer 层 (resblocks)
        for i, block in enumerate(self.clip_backbone_train.backbone.visual.transformer.resblocks):
            
            if i in train_begin_block_num:
                # 允许训练，并注册 Hook
                for param in block.parameters():
                    param.requires_grad = True
                
                # 在attn注册 Hook 并保存 handler
                # handler = block.attn.register_forward_hook(hook_fn_attn)
                handler = block.attn.register_forward_hook(hook_fn_attn)
                self.hook_handlers.append(handler)
            
            else:
                # 冻结参数，不参与训练
                for param in block.parameters():
                    param.requires_grad = False
        if self.train_proj:
            # 允许训练 proj 层
            pass
        else:
            self.clip_backbone_train.backbone.visual.proj.requires_grad_(False)
    def remove_hooks(self):
        """移除所有已注册的hooks"""
        for handler in self.hook_handlers:
            handler.remove()
        self.hook_handlers.clear()

    def register_hooks(self):
        """
        重新注册指定层的 hooks仅在指定的 transformer 层 (resblocks) 注册。

        Args:
            train_begin_block_num (list): 需要注册 Hook 的层的索引列表，例如 [2, 4, 7]。
        """

        # 清空之前的 handler，防止重复注册
        for handler in self.hook_handlers:
            handler.remove()
        self.hook_handlers = []

        # 定义 Hook 函数
        def hook_fn_attn(module, input, output):
            if "clip_trn_attn" not in self.hook_features_trn:
                self.hook_features_trn["clip_trn_attn"] = []
            self.hook_features_trn["clip_trn_attn"].append(output)

        # 遍历所有 transformer 层 (resblocks)
        for i, block in enumerate(self.clip_backbone_train.backbone.visual.transformer.resblocks):
            if i in self.train_begin_block_num:
                # 仅对指定的层注册 Hook，并保存 handler
                handler = block.attn.register_forward_hook(hook_fn_attn)
                self.hook_handlers.append(handler)

    def load_teachers(self, primary_teacher):
        self.dino_T = NORMALIZE
        if primary_teacher == 'dino':
            # 我们的teacher 只有dino提取的特征，是作为监督 监督qk的attn
            # ==== build DINO backbone =====
            self.vit_encoder, self.initial_dim, self.hook_features = get_vit_encoder(
                self.vit_arch,
                self.vit_model_name, 
                self.vit_patch_size,
                self.enc_type_feats,
            )
            # dino不参与训练 
            self.vit_encoder.eval()
            for param in self.vit_encoder.parameters():
                param.requires_grad = False

        # 否则是SAM
        else:
            if self.sam_cfg.get('checkpoint'):
                if sam_model_registry is None:
                    raise ImportError('segment_anything is required when sam_cfg is provided but could not be imported.')
                ckpt_path = os.path.expanduser(self.sam_cfg.get('checkpoint'))
                if not os.path.isfile(ckpt_path):
                    raise FileNotFoundError(f'Cannot find SAM checkpoint at {ckpt_path}')
                model_type = self.sam_cfg.get('model_type', 'vit_h')
                sam_model = sam_model_registry[model_type](checkpoint=ckpt_path)
                self.sam_image_encoder = sam_model.image_encoder
                self.sam_image_encoder.eval()
                for param in self.sam_image_encoder.parameters():
                    param.requires_grad = False
                self.sam_feat_hw = None
                print(f'Loaded SAM teacher ({model_type}) from {ckpt_path}')
            else:
                self.sam_image_encoder = None
                self.sam_feat_hw = None

        # 无需训练的CLIP模型在初始化部分的变量clip_backbone_freeze


    # 这里的数据处理，可以在mmseg参数中使用 dict(type='Pad', size_divisor=16) 方便实现
    def make_input_divisible(self, x: torch.Tensor, patch_size: int = None) -> torch.Tensor:
        """Pad some pixels to make the input size divisible by the patch size."""
        if patch_size is None:
            patch_size = self.vit_patch_size
        B, _, H_0, W_0 = x.shape
        pad_w = (patch_size - W_0 % patch_size) % patch_size
        pad_h = (patch_size - H_0 % patch_size) % patch_size
        # 使用纯黑色填充
        x = nn.functional.pad(x, (0, pad_w, 0, pad_h), value=0)
        return x

    def has_sam_teacher(self) -> bool:
        return self.sam_image_encoder is not None

    def _prepare_sam_input(self, x: torch.Tensor) -> torch.Tensor:
        x = self.make_input_divisible(x, self.sam_patch_size)
        x = F.interpolate(x, size=(self.sam_image_size, self.sam_image_size), mode='bilinear', align_corners=False)
        mean = self._sam_pixel_mean.to(device=x.device, dtype=x.dtype)
        std = self._sam_pixel_std.to(device=x.device, dtype=x.dtype)
        return (x - mean) / std

    @torch.no_grad()
    def get_sam_corrs(self, x: torch.Tensor):
        if not self.has_sam_teacher():
            raise RuntimeError('SAM teacher is not initialized. Provide sam_cfg with valid checkpoint to enable it.')
        sam_input = self._prepare_sam_input(x)
        encoder = self.sam_image_encoder
        device = next(encoder.parameters()).device
        if sam_input.device != device:
            sam_input = sam_input.to(device)
        embeddings = encoder(sam_input)
        B, C, H, W = embeddings.shape
        feats = embeddings.reshape(B, C, -1).permute(0, 2, 1)
        feats = F.normalize(feats, dim=-1)
        corrs = torch.matmul(feats, feats.transpose(1, 2))
        corrs = corrs.view(B, H, W, H * W).permute(0, 3, 1, 2)
        if self.sam_gamma is not None:
            corrs = corrs.masked_fill(corrs < self.sam_gamma, 0.0)
        self.sam_feat_hw = (H, W)
        return corrs

    @torch.no_grad()
    def extract_feats(self, type_feats="k"):
        """
        DINO feature extractor. Attaches a hook on the last attention layer.
        :param type_feats: (string) - type of features from DINO ViT
        """
        nh = self.vit_encoder.blocks[-1].attn.num_heads
        nb_im, nb_tokens, C_qkv = self.hook_features["qkv"].shape

        qkv = (
            self.hook_features["qkv"]
                .reshape(
                nb_im, nb_tokens, 3, nh, C_qkv // nh // 3
            )  # 3 corresponding to |qkv|
                .permute(2, 0, 3, 1, 4)
        )
        q, k, v = qkv[0], qkv[1], qkv[2]
        if type_feats == "q":
            return q.transpose(1, 2).float()
        elif type_feats == "k":
            return k.transpose(1, 2).float()
        elif type_feats == "v":
            return v.transpose(1, 2).float()
        else:
            raise ValueError("Unknown features")
        
    @staticmethod
    def _resize_pos_embed(pos_embed, input_shape, pos_shape, mode):
        """
        Resize pos_embed using bicubic interpolate method.
        
        Args:
            pos_embed (torch.Tensor): Position embedding weights of shape [1, L, C].
            input_shape (tuple): Target (height, width) after patchification.
            pos_shape (tuple): Original (height, width) of the pre-trained position embedding.
            mode (str): Interpolation mode.
        
        Return:
            torch.Tensor: The resized pos_embed of shape [1, L_new, C].
        """
        assert pos_embed.ndim == 3, 'pos_embed shape must be [1, L, C]'
        
        pos_h, pos_w = pos_shape
        cls_token_weight = pos_embed[:, 0:1, :]  # (1, 1, C)
        pos_embed_weight = pos_embed[:, 1:, :]  # (1, L-1, C)
        
        # 调整形状以便插值
        pos_embed_weight = pos_embed_weight.reshape(
            1, pos_h, pos_w, -1).permute(0, 3, 1, 2)  # (1, C, pos_h, pos_w)
        
        # 进行插值操作
        pos_embed_weight = torch.nn.functional.interpolate(
            pos_embed_weight, size=input_shape, mode=mode, align_corners=False
        )
        
        # 恢复形状
        pos_embed_weight = pos_embed_weight.permute(0, 2, 3, 1).reshape(1, -1, pos_embed.shape[2])
        
        # 重新拼接 CLS token 和插值后的 patch token
        pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)
        
        return pos_embed

    # 判断输入的图片分辨率是否为初始化clip的原生默认分辨率，如果不是，则需要resize
    def resize_pos_embed(self, inputs, model, mode='bicubic'):
        """
        inputs: 图片
        model: 一般是clip的backbone
        mode: 插值方式
        """
        # 传入model 一般是CLIP 
        pos_embed = model.visual.positional_embedding # (L, C)

        B, C, H, W = inputs.shape
        patch_size = self.maskclip_cfg.model.backbone.patch_size
        
        hw_shape = (H // patch_size, W // patch_size)

        x_len, pos_len = hw_shape[0]*hw_shape[1], pos_embed.shape[0]
        
        if x_len != pos_len:
        # 获取预训练时位置编码的分辨率
            orin_pos_size = int((pos_embed.shape[0]-1) ** 0.5)
            # 如果vit 处理大分辨率的图片，这就需要对位置编码进行插值，所以要传入位置编码
            model.visual.positional_embedding.data = self._resize_pos_embed(
                pos_embed[None, :, :], hw_shape,  (orin_pos_size, orin_pos_size), mode)[0]
        else:
            pass

        # self._positional_embd = nn.Parameter(self.clip_backbone_freeze.backbone.visual.positional_embedding.data.clone())
        # model.visual.positional_embedding.data = self._positional_embd


    def forward(self, x: torch.Tensor):

        x = self.make_input_divisible(x)
        self.resize_pos_embed(x, self.clip_backbone_freeze.backbone)
        
        output_frz = self.clip_backbone_freeze.backbone.visual(x)
        cls_token_freeze = output_frz

        self.hook_features_trn["clip_trn_attn"] = []  # 每次调用前清空列表
        # 得到需要训练CLIP模型的输出

        self.resize_pos_embed(x, self.clip_backbone_train.backbone)
        output_trn = self.clip_backbone_train.backbone.visual(x)
        cls_token_trn = output_trn
        # B, , hf * wf, hf, wf
        # 理解成 B, 位置， 注意力图
        corr_outputs = dict()
        if self.has_sam_teacher():
            corr_outputs["sam"] = self.get_sam_corrs(x)
        else:
            corr_dino = self.get_dino_corrs(x)
            corr_outputs = {"dino": corr_dino}

        # 这里不用denoiser的对V做pooling的方式，直接还用经过几层MLP(可学习的几层)后的特征v
        # output = self.compute_weighted_pool(output, masks)

        # output = self.clip_backbone.decode_head.cls_seg(output)
        # 前馈得到的attn值，
        isinstance(self.hook_features_trn["clip_trn_attn"], list)
        clip_trn_attn_list = self.hook_features_trn["clip_trn_attn"]

 
        return cls_token_freeze, cls_token_trn, corr_outputs, clip_trn_attn_list
    

    @torch.no_grad()
    def get_dino_corrs(self, x: torch.Tensor):
        """
        Gets correlations of DINO features. Applies a threshold on the correlations with self.gamma.

        :param x: (torch.Tensor) - batch of input images
        :return: (torch.Tensor) - feature correlations
        """
        B = x.shape[0]
        feats, (hf, wf) = self.get_dino_features(x)  # B C (h_f * w_f) normalized
        corrs = torch.matmul(feats.permute(0, 2, 1), feats).reshape(B, hf, wf, hf * wf)
        # if self.gamma is not None:
        #     corrs[corrs < self.gamma] = 0.0

        return corrs.permute(0, 3, 1, 2)  # B C h w

    def get_dino_features(self, x: torch.Tensor):
        """
        Extracts dense DINO features.

        :param x: (torch.Tensor) - batch of input images
        :return: (torch.Tensor) - of dense DINO features, (int, int) - size of feature map
        """
        x = self.make_input_divisible(x)
        batch = self.dino_T(x)  # tensor B C H W
        h_featmap = batch.shape[-2] // self.vit_patch_size
        w_featmap = batch.shape[-1] // self.vit_patch_size

        # Forward pass
        # Encoder forward pass and get hooked intermediate values
        _ = self.vit_encoder(batch)

        # Get decoder features
        feats = self.extract_feats(type_feats=self.enc_type_feats)
        num_extra_tokens = 1
        # 对应于dinov2的额外寄存器的情况
        # if self.hook_features['num_register_tokens']:
        #     num_extra_tokens+=self.hook_features['num_register_tokens']
        num_extra_tokens += int(self.hook_features.get('num_register_tokens', 0) or 0)
        # B nbtokens+1 nh dim
        feats = feats[:, num_extra_tokens:, :, :].flatten(-2, -1).permute(0, 2, 1)  # B C nbtokens
        # B, C, nbtokens
        feats = feats / feats.norm(dim=1, keepdim=True)  # normalize features

        return feats, (h_featmap, w_featmap)

    @torch.no_grad()
    def get_clip_features(self, x: torch.Tensor):
        """
        Extracts MaskCLIP features
        :param x: (torch.Tensor) - batch of input images
        :return: (torch.Tensor) - clip dense features, (torch.Tensor) - output probabilities
        """
        x = self.make_input_divisible(x) # 这里传入的x就已经调用了make_input_divisible了，可以注释掉
        # maskclip_map：直接就是出的mask
        maskclip_map, feat = self.clip_backbone_freeze(x, return_feat=True)

        return feat, maskclip_map

    @torch.no_grad()
    def get_found_preds(self, x: torch.Tensor, resize=None):
        """
        Gets FOUND predictions.
        :param x: (torch.Tensor) - batch of input images
        :param resize: optional (tuple(int)) - size to resize the output prediction map to
        :return: (torch.Tensor) - saliency predictions
        """
        x = self.make_input_divisible(x)
        x = self.dino_T(x)
        found_preds, _, shape_f, att = self.found_model.forward_step(x, for_eval=True)
        preds = torch.sigmoid(found_preds.detach()).float()
        if resize is not None:
            preds = T.functional.resize(preds, resize)
        return preds

    @staticmethod
    def compute_weighted_pool(maskclip_feats: torch.Tensor, corrs: torch.Tensor):
        """
        Weighted pooling method.
        :param maskclip_feats: torch.tensor - raw clip features
        :param corrs: torch.tensor - correlations as weights for pooling mechanism
        :return: torch.tensor - refined clip features
        """
        # maskclip_feats：B C H W
        # corrs：B H W HW
        B = maskclip_feats.shape[0]
        h_m, w_m = maskclip_feats.shape[-2:]
        h_w, w_w = corrs.shape[-2:]

        if (h_m != h_w) or (w_m != w_w):
            # 这里一般dino的特征图的分辨率是高于clip的， 所以将dino的特征图resize到clip的分辨率
            maskclip_feats = resize(
                input=maskclip_feats,
                size=(h_w, w_w),
                mode='bilinear',
                align_corners=False)
            h_m, w_m = h_w, w_w #此时统一分辨率

        maskclip_feats_ref = torch.einsum("bnij, bcij -> bcn", corrs, maskclip_feats)  # B C HW
        norm_factor = corrs.flatten(-2, -1).sum(dim=-1)[:, None]  # B 1 HW
        maskclip_feats_ref = maskclip_feats_ref / (norm_factor + 1e-6)

        # RESHAPE back to 2d
        maskclip_feats_ref = maskclip_feats_ref.reshape(B, -1, h_m, w_m)
        return maskclip_feats_ref


@MODELS.register_module()
class CLIP_DINOiser_custom(DinoCLIPCustom):
    """
    CLIP-DINOiser - torch.nn.Module with two single conv layers for object correlations (obj_proj) and background
    filtering (bkg_decoder).
    """

    def __init__(self, clip_backbone, class_names, train_block_num:Union[int, list], vit_arch="vit_base", vit_model_name = 'dino', vit_patch_size=16, enc_type_feats="v",
                 feats_idx=-3, gamma=0.2, delta=0.99, in_dim=256, conv_kernel=3, train_proj=False, attn_pooling=True, attn_type = 'maskclip', patch_grid = 14, sam_cfg=None):
        super(CLIP_DINOiser_custom, self).__init__(clip_backbone, class_names, train_block_num, vit_arch, vit_model_name, vit_patch_size, enc_type_feats, gamma, train_proj=train_proj, attn_pooling=attn_pooling, sam_cfg=sam_cfg)

        in_size = 768 if feats_idx != 'final' else 512
        self.gamma = gamma
        self.feats_idx = feats_idx
        self.delta = delta
        self.in_dim = in_dim
        self.attn_type = attn_type
        self.patch_grid = patch_grid
        # self.bkg_decoder = nn.Conv2d(in_size, 1, (1, 1))
        # self.obj_proj = nn.Conv2d(in_size, in_dim, (conv_kernel, conv_kernel), padding=conv_kernel // 2,
        #                           padding_mode='replicate')

        # setup clip features for training
        train_feats = {}
        if feats_idx != 'final':

            def get_activation(name):
                def hook(model, input, output):
                    train_feats[name] = output.detach().permute(1, 0, 2)  # change to batch first

                return hook

            self.clip_backbone_freeze.backbone.visual.transformer.resblocks[feats_idx].ln_2.register_forward_hook(
                get_activation('clip_inter'))
            self.train_feats = train_feats
        
        # 给maskclip设置推理时的attn_type
        self.set_attn_type(self.attn_type, self.patch_grid)

    def set_attn_type(self, attn_type, patch_grid):
        self.attn_type = attn_type
        self.clip_backbone_train.attn_type = attn_type
        self.clip_backbone_freeze.attn_type = attn_type
        self.clip_backbone_train.orin_pos_size = patch_grid
        self.clip_backbone_freeze.orin_pos_size = patch_grid
        print(f'Set attn_type to {attn_type}, patch_grid to {patch_grid}')


    def forward_pass(self, x: torch.Tensor):
        # x = self.make_input_divisible(x)
        # 1. 得到中间输出 无需知道具体做了啥，只要能保证网络前向传播就行了
        _ = self.get_clip_features(x)
        v = self.train_feats['v']
        cls_token = self.train_feats['clip_final'][:, 0]
        
        # 2. 输入到upper encoder, 和proj 得到最后的cls_token
        new_cls_token = self.get_new_cls_token(x)

        # 3. 得到dino的输出,得到correlations
        masks = self.get_dino_corrs(x)

        # 4. 将中间输出输入最后一个block,得到qk计算correlations
        coor_clip = self.get_clip_coors(v, self.encoder_upper)
        

        return cls_token, new_cls_token, masks, coor_clip
    
    def forward_custom(self, x: torch.Tensor):
        # x = self.make_input_divisible(x)
        # 1. 得到初始化的两个clip输出的cls token, dino的correlations, 可训练的CLIP不被冻结的每层的attn结果
        cls_token_freeze, cls_token_trn, teacher_corrs, clip_trn_attn_list = super().forward(x)
        return cls_token_freeze, cls_token_trn, teacher_corrs, clip_trn_attn_list

    def forward_train(self, x: torch.Tensor):
        output = self.forward_custom(x)
        return output

    # 为了迎合mmdet的测试，这里将forward默认为测试
    def forward(self, x: torch.Tensor):

        # output = self.clip_backbone_train.get_v(x)
        output = self.clip_backbone_train.get_v(x,attn_type = self.attn_type)
        if self.attn_pooling:
            # 理解成 B, 位置， 注意力图
            corrs = self.get_dino_corrs(x)
            # Compute weighted pooling --------------------------------------------------
            if self.gamma:
                corrs[corrs < self.gamma] = 0.0
            out_feats = self.compute_weighted_pool(output, corrs)
        else:
            out_feats = output
        # Get the predictions --------------------------------------------------
        output = self.clip_backbone_train.decode_head.forward(out_feats)

        return output

    # # 为了迎合mmdet的测试，这里将forward默认为测试
    # def forward(self, img, img_metas, return_loss=True, **kwargs):
    #     H,W = img[0].shape[-2:]
    #     H = H // self.clip_backbone_train.patch_size
    #     W = W // self.clip_backbone_train.patch_size
    #     output = self.clip_backbone_train.get_v_test(img, self.attn_type)
    #     if self.attn_pooling:
    #         # 理解成 B, 位置， 注意力图
    #         corrs = self.get_dino_corrs(img)
    #         # Compute weighted pooling --------------------------------------------------
    #         if self.gamma:
    #             corrs[corrs < self.gamma] = 0.0
    #         out_feats = self.compute_weighted_pool(output, corrs)
    #     else:
    #         out_feats = output
    #     # Get the predictions --------------------------------------------------
    #     seg_logit = self.clip_backbone_train.decode_head.forward(out_feats)
    #     # seg_logit = seg_logit.reshape(1, H, W,-1)
    #     # seg_logit = seg_logit.permute(0, 3, 1, 2).contiguous() 
    #     size = img_metas[0][0]['ori_shape'][:2]
    #     output = resize(
    #             seg_logit.cpu(),
    #             size=size,
    #             mode='bilinear',
    #             align_corners=False,
    #             warning=False)
        
    #     # 后处理
    #     query_idx = self.clip_backbone_train.decode_head.query_idx
    #     # 开始换算成逻辑类别
    #     num_cls, num_queries = max(query_idx) + 1, len(query_idx)
    #     if num_cls != num_queries:
    #         cls_index = nn.functional.one_hot(query_idx) # 将可能具有prompt的类别分类映射到逻辑类别 形状为prompt后的单独类别个数*逻辑类别个数
    #         cls_index = cls_index.T.view(num_cls, num_queries, 1, 1)
    #         output = output.to('cuda')
    #         seg_logits = (output * cls_index).max(1)[0]
    #         output = seg_logits
    #         output = output.cpu()
    #         # output = seg_logits.argmax(0, keepdim=True)

    #     return output
    
    # 用来推理画图结果
    def show_result(self,
                    img,
                    result,
                    palette=None,
                    classes=None,
                    win_name='',
                    show=False,
                    wait_time=0,
                    out_file=None,
                    opacity=0.5,
                    gt=None):
        """Draw `result` over `img`.

        Args:
            img (str or Tensor): The image to be displayed.
            result (Tensor): The semantic segmentation results to draw over
                `img`.
            palette (list[list[int]]] | np.ndarray | None): The palette of
                segmentation map. If None is given, random palette will be
                generated. Default: None
            win_name (str): The window name.
            wait_time (int): Value of waitKey param.
                Default: 0.
            show (bool): Whether to show the image.
                Default: False.
            out_file (str or None): The filename to write the image.
                Default: None.
            opacity(float): Opacity of painted segmentation map.
                Default 0.5.
                Must be in (0, 1] range.
        Returns:
            img (Tensor): Only if not `show` or `out_file`
        """
        img = mmcv.imread(img)
        img = img.copy()
        seg = result[0]

        seg = seg.argmax(dim=0)

        seg = seg.cpu().numpy() if seg.is_cuda else seg.numpy()
        if classes is not None:
            self.CLASSES = classes
        if palette is None:
            if self.PALETTE is None:
                # Get random state before set seed,
                # and restore random state later.
                # It will prevent loss of randomness, as the palette
                # may be different in each iteration if not specified.
                # See: https://github.com/open-mmlab/mmdetection/issues/5844
                state = np.random.get_state()
                np.random.seed(42)
                # random palette
                palette = np.random.randint(
                    0, 255, size=(len(self.CLASSES), 3))
                np.random.set_state(state)
            else:
                palette = self.PALETTE
        palette = np.array(palette)
        assert palette.shape[0] == len(self.CLASSES), '({}) vs. ({})'.format(palette.shape[0], len(self.CLASSES))
        assert palette.shape[1] == 3
        assert len(palette.shape) == 2
        assert 0 < opacity <= 1.0
        color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
        for label, color in enumerate(palette):
            color_seg[seg == label, :] = color
        # convert to BGR
        color_seg = color_seg[..., ::-1]

        img = img * (1 - opacity) + color_seg * opacity
        if gt is not None:
            # set the ignored area to black
            img[gt == 255, :] = np.array([0, 0, 0])
        img = img.astype(np.uint8)
        # if out_file specified, do not show image in window
        if out_file is not None:
            show = False

        if show:
            mmcv.imshow(img, win_name, wait_time)
        if out_file is not None:
            mmcv.imwrite(img, out_file)

        if not (show or out_file):
            warnings.warn('show==False and out_file is not specified, only '
                          'result image will be returned')
            return img
