# ---------------------------------------------------------------------------------------------------
# CLIP-DINOiser
# authors: Monika Wysoczanska, Warsaw University of Technology

# Copyright (c) OpenMMLab. All rights reserved.
# Modified version of the original MaskCLIP code: https://github.com/chongzhou96/MaskCLIP/tree/master
# ---------------------------------------------------------------------------------------------------

import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.ops import resize
from typing import List, Tuple
from torch import Tensor
from open_clip import get_tokenizer,  create_model_from_pretrained, create_model_and_transforms
from models.builder import MODELS
import torchvision.transforms as T
from .utils.prompt_templates import imagenet_templates
import math

OPENAI_NORMALIZE = T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))


@MODELS.register_module()
class MaskClip(nn.Module):
    def __init__(
            self,
            backbone,
            decode_head,
            clip_model,
            class_names
        ):
        super(MaskClip, self).__init__()
        # eval(decode_head.get('type')) 将字符串转换为实际的类对象
        self.decode_head = eval(decode_head.get('type'))(clip_model, class_names, **decode_head)
        self.patch_size = backbone.get('patch_size')
        self.img_size = tuple([backbone.get('img_size', 224)]*2)
        pretrained = decode_head.get("pretrained")
        # 这里与maskclip load模型不同,直接使用open_clip包加载模型,不用mmseg的ViT定义类,也就省去了maskclip中模型名称转化步骤
        # 这里其实是加载CLIP _是数据处理流程
        
        model, _tranform = create_model_from_pretrained(clip_model, pretrained=pretrained)
        model.eval()
        # self.clip_Transform = OPENAI_NORMALIZE
        self.clip_Transform = None
        self.hook_features = dict()
        # print(f"[INIT] self.hook_features ID: {id(self.hook_features)}")
        self.backbone = model
        # 定义hook功能,用于提取backbone的特征图
        def hook_fn_forward(module, input, output):
            # print(f'\n hook_fn_forward被触发, output shape: {output.shape}')
            # print(f"[HOOK] self.hook_features ID: {id(self.hook_features)}")  # 检查是否变了
            self.hook_features["v"] = output.clone().detach()
            # print(f'赋值完成, hook_features["v"] shape: {self.hook_features["v"].shape}')
        # 将hook注册到backbone的倒数第二个resblock上,等价于获取最后一个block的输入
        self.backbone.visual.transformer.resblocks[-2].register_forward_hook(hook_fn_forward)
        self._positional_embd = nn.Parameter(self.backbone.visual.positional_embedding.data.clone())

        text_channels=512
        in_channels=3
        self.proj = nn.Conv2d(in_channels, text_channels, 1, bias=False)

    @torch.no_grad()
    def extract_feat(self, inputs: Tensor, attn_type) -> Tuple[Tensor]:
        """Extract features from images."""
        pos_embed = self.backbone.visual.positional_embedding

        B, C, H, W = inputs.shape
        hw_shape = (H // self.patch_size, W // self.patch_size)
        # 传入图片的分辨率 如果不等于224*224 
        x_len, pos_len = hw_shape[0]*hw_shape[1], pos_embed.shape[0]-1
        
        # 不一样的原因是cls token的存在
        if x_len != pos_len:
            # if pos_len == (self.img_size[0] // self.patch_size) * (self.img_size[1] // self.patch_size) + 1: # 需要对图片进行划窗操作
            #     pos_h = self.img_size[0] // self.patch_size
            #     pos_w = self.img_size[1] // self.patch_size
            # else:
            #     raise ValueError(
            #         '{}, {}'.format(x_len, pos_len))
            # 如果vit 处理大分辨率的图片，这就需要对位置编码进行插值，所以要传入位置编码
            # orin_pos_size = int((pos_embed.shape[0]-1) ** 0.5)
            orin_pos_size = self.orin_pos_size
            self.backbone.visual.positional_embedding.data = self.resize_pos_embed(
                self._positional_embd[None], hw_shape,  (orin_pos_size, orin_pos_size), 'bicubic')[0]
        # 过一遍网络, 为了hook到最后一个resblock的输入 
        # print(f"[HOOK] self.hook_features ID: {id(self.hook_features)}")  # 检查是否变了
        _ = self.backbone(inputs)
        # print(f"[HOOK] self.hook_features ID: {id(self.hook_features)}")  # 检查是否变了
        v = self.hook_features["v"]
        # 手动实现将数据从倒数第二层的输出到最后一层的输入
        v = self.extract_v(v, self.backbone.visual.transformer.resblocks[-1], attn_type).permute(1, 0, 2)
        # layernorm
        v = self.backbone.visual.ln_post(v)
        v = v[:, 1:]
        v = v.reshape(B, hw_shape[0], hw_shape[1], -1).permute(0, 3, 1, 2).contiguous()
        

        self.proj.weight = nn.Parameter(self.backbone.visual.proj.t()[:, :, None, None])
        # print(f'before proj v shape: {v.shape}')
        v = self.proj(v)
        # print(f'after proj v shape: {v.shape}')
        
        # 看样子是用完了复原位置编码,恢复原状
        self.backbone.visual.positional_embedding.data = self._positional_embd
        return v

    # def extract_v(self, x, block):
    #     y = block.ln_1(x)
    #     y = torch.nn.functional.linear(y, block.attn.in_proj_weight, block.attn.in_proj_bias)
    #     B, N, C = y.shape
    #     y = y.view(B, N, 3, C // 3).permute(2, 0, 1, 3).reshape(3 * B, N, C // 3)
    #     y = F.linear(y, block.attn.out_proj.weight, block.attn.out_proj.bias)
    #     q, k, v = y.tensor_split(3, dim=0)
    #     v += x
    #     v += block.mlp(block.ln_2(v))
    #     return v
    
    def extract_v(self, x, block, attn_type):
        """提取 CLIP 最后一层的 V 特征."""
        attn_layer = block.attn
        num_heads = attn_layer.num_heads
        num_tokens, bsz, embed_dim = x.size()
        head_dim = embed_dim // num_heads
        scale = head_dim ** -0.5

        # 1️⃣ 计算 LayerNorm
        y = block.ln_1(x)

        # 2️⃣ 计算 Q, K, V
        q, k, v = F.linear(y, attn_layer.in_proj_weight, attn_layer.in_proj_bias).chunk(3, dim=-1)

        # 3️⃣ 变形为 Multi-Head Attention 计算格式
        q = q.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
        k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
        v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)

            # 自动推导 n_patches
        num_patch_tokens = num_tokens - 1
        side_len = int(num_patch_tokens ** 0.5)
        n_patches = (side_len, side_len)


        if attn_type in ['naclip', 'nonly', 'kk']:
            if not hasattr(self, 'attn_cache'):
                self.attn_cache = {}

            if attn_type in ['naclip', 'nonly']:
                if n_patches not in self.attn_cache:
                    window_size = [s * 2 - 1 for s in n_patches]
                    window = self.gaussian_window(*window_size, std=1.0)
                    addition = self.get_attention_addition(*n_patches, window).unsqueeze(0).to(x.dtype).to(x.device)
                    self.attn_cache[n_patches] = addition
                else:
                    addition = self.attn_cache[n_patches]

                if attn_type == 'naclip':
                    attn_weights = torch.bmm(k, k.transpose(1, 2)) * scale
                    omega = addition
                elif attn_type == 'nonly':
                    attn_weights = torch.zeros((num_heads, q.size(1), k.size(1)), dtype=x.dtype, device=x.device)
                    omega = addition * (scale * torch.einsum('hop,hPO->hpP', q.norm(dim=2).unsqueeze(1), k.norm(dim=2).unsqueeze(2)).mean().item())

                attn_weights += omega
                attn_weights = F.softmax(attn_weights, dim=-1)

            elif attn_type == 'kk':
                attn_weights = torch.bmm(k * scale, k.transpose(1, 2))
                attn_weights = F.softmax(attn_weights, dim=-1)

            v = torch.bmm(attn_weights, v)
            v = v.transpose(0, 1).contiguous().view(-1, bsz, embed_dim)
            v = block.attn.out_proj(v)
            x = x + v
            x = x + block.mlp(block.ln_2(x))
            return x
        # 4️⃣ 计算注意力
        elif attn_type == 'csa':
            # 计算 QK^T 注意力
            # attn_weights = F.softmax(torch.bmm(q, k.transpose(1, 2)) * scale, dim=-1)
            q_attn = torch.bmm(q, q.transpose(1, 2)) * scale
            k_attn = torch.bmm(k, k.transpose(1, 2)) * scale
            attn_weights = F.softmax(q_attn, dim=-1) + F.softmax(k_attn, dim=-1)
            v = torch.bmm(attn_weights, v)
            # 5️⃣ 计算输出层并加残差
            v = v.transpose(0, 1).contiguous().view(-1, bsz, embed_dim)
            v = block.attn.out_proj(v)  # `W_o` 投影
            x = x + block.ls_1(v)  # 残差连接

            # 6️⃣ 通过 MLP 层
            x = x + block.ls_2(block.mlp(block.ln_2(x)))
        elif attn_type =='maskclip':
            # 计算 QK^T 注意力
            # attn_weights = F.softmax(torch.bmm(q, k.transpose(1, 2)) * scale, dim=-1)
            # q_attn = torch.bmm(q, q.transpose(1, 2)) * scale
            # k_attn = torch.bmm(k, k.transpose(1, 2)) * scale
            # attn_weights = F.softmax(q_attn, dim=-1) + F.softmax(k_attn, dim=-1)
            # v = torch.bmm(attn_weights, v)
            # 5️⃣ 计算输出层并加残差
            v = v.transpose(0, 1).contiguous().view(-1, bsz, embed_dim)
            v = block.attn.out_proj(v)  # `W_o` 投影
            x += v  # 残差连接

            # 6️⃣ 通过 MLP 层
            x = x + block.mlp(block.ln_2(x))

        elif attn_type == 'clearclip':
            q_attn = torch.bmm(q, q.transpose(1, 2)) * scale
            k_attn = torch.bmm(k, k.transpose(1, 2)) * scale
            # attn_weights = F.softmax(q_attn, dim=-1) + F.softmax(k_attn, dim=-1)
            attn_weights = F.softmax(q_attn, dim=-1)
            v = torch.bmm(attn_weights, v)
            # 5️⃣ 计算输出层并加残差
            v = v.transpose(0, 1).contiguous().view(-1, bsz, embed_dim)
            v = block.attn.out_proj(v)  # `W_o` 投影
            # x += v  # 残差连接
            # 6️⃣ 通过 MLP 层
            # x = x + block.mlp(block.ln_2(x))
            x = v
        elif attn_type == 'clip':
            # 计算 QK^T 注意力
            attn_weights = F.softmax(torch.bmm(q, k.transpose(1, 2)) * scale, dim=-1)
            # attn = torch.bmm(q, k.transpose(1, 2)) * scale
            v = torch.bmm(attn_weights, v)
            # 5️⃣ 计算输出层并加残差
            v = v.transpose(0, 1).contiguous().view(-1, bsz, embed_dim)
            v = block.attn.out_proj(v)  # `W_o` 投影
            x += v  # 残差连接

            # 6️⃣ 通过 MLP 层
            x = x + block.mlp(block.ln_2(x))
        return x
    @staticmethod
    def gaussian_window(dim1, dim2, std=1.):
        constant = 1 / (std * math.sqrt(2))
        ks = [torch.linspace(-(dim - 1) / 2.0 * constant,
                            (dim - 1) / 2.0 * constant,
                            dim) for dim in [dim1, dim2]]
        dist_square_to_mu = (torch.stack(torch.meshgrid(*ks, indexing='ij')) ** 2).sum(0)
        return torch.exp(-dist_square_to_mu)

    @staticmethod
    def get_attention_addition(dim1, dim2, window, adjust_for_cls=True):
        m = torch.einsum('ij,kl->ijkl', torch.eye(dim1), torch.eye(dim2)).permute(0, 3, 1, 2).contiguous()
        out = F.conv2d(m.view(-1, dim1, dim2).unsqueeze(1), window.unsqueeze(0).unsqueeze(1), padding='same').squeeze(1)
        out = out.view(dim1 * dim2, dim1 * dim2)
        if adjust_for_cls:
            v_adjusted = torch.vstack([torch.zeros((1, dim1 * dim2)), out])
            out = torch.hstack([torch.zeros((dim1 * dim2 + 1, 1)), v_adjusted])
        return out


    @staticmethod
    def resize_pos_embed_xxx(pos_embed, input_shpae, pos_shape, mode):
        """Resize pos_embed weights.

        Resize pos_embed using bicubic interpolate method.
        Args:
            pos_embed (torch.Tensor): Position embedding weights.
            input_shpae (tuple): Tuple for (downsampled input image height,
                downsampled input image width).
            pos_shape (tuple): The resolution of downsampled origin training
                image.
            mode (str): Algorithm used for upsampling:
                ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
                ``'trilinear'``. Default: ``'nearest'``
        Return:
            torch.Tensor: The resized pos_embed of shape [B, L_new, C]
        """
        assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'
        pos_h, pos_w = pos_shape
        cls_token_weight = pos_embed[:, 0]
        pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]
        pos_embed_weight = pos_embed_weight.reshape(
            1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)
        pos_embed_weight = resize(
            pos_embed_weight, size=input_shpae, align_corners=False, mode=mode)
        cls_token_weight = cls_token_weight.unsqueeze(1)
        pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)
        pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)
        return pos_embed
    
    @staticmethod
    def resize_pos_embed(pos_embed, input_shape, pos_shape, mode):
        """
        Resize pos_embed using bicubic interpolate method.
        
        Args:
            pos_embed (torch.Tensor): Position embedding weights of shape [1, L, C].
            input_shape (tuple): Target (height, width) after patchification.
            pos_shape (tuple): Original (height, width) of the pre-trained position embedding.
            mode (str): Interpolation mode.
        
        Return:
            torch.Tensor: The resized pos_embed of shape [1, L_new, C].
        """
        assert pos_embed.ndim == 3, 'pos_embed shape must be [1, L, C]'
        
        pos_h, pos_w = pos_shape
        cls_token_weight = pos_embed[:, 0:1, :]  # (1, 1, C)
        pos_embed_weight = pos_embed[:, 1:, :]  # (1, L-1, C) 1*196*768
        # pos_embed_weight = pos_embed_weight.reshape(1, 
        
        # 调整形状以便插值
        pos_embed_weight = pos_embed_weight.reshape(
            1, pos_h, pos_w, -1).permute(0, 3, 1, 2)  # (1, C, pos_h, pos_w)
        
        # 进行插值操作
        pos_embed_weight = torch.nn.functional.interpolate(
            pos_embed_weight, size=input_shape, mode=mode, align_corners=False
        )
        
        # 恢复形状
        pos_embed_weight = pos_embed_weight.permute(0, 2, 3, 1).reshape(1, -1, pos_embed.shape[2])
        
        # 重新拼接 CLS token 和插值后的 patch token
        pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)
        
        return pos_embed
    
    def get_v(self, inputs: Tensor, attn_type='maskclip') -> Tensor:
        """返回测试阶段的maskclip的v, 用于增加根据attn的pooling操作"""
        if self.clip_Transform is not None:
            inputs = self.clip_Transform(inputs)
        # print(f"forward state [HOOK] self.hook_features ID: {id(self.hook_features)}")
        x = self.extract_feat(inputs, attn_type=attn_type)
        return x
    
    def get_v_test(self, inputs, attn_type='maskclip') -> Tensor:
        """返回测试画图阶段的maskclip的v, 迎合gpu_test的画图"""
        # 判断inputs的类型
        if isinstance(inputs, list):
            # 已经归一化过了
            pass
        else:
            inputs = self.clip_Transform(inputs)
        # print(f"forward state [HOOK] self.hook_features ID: {id(self.hook_features)}")
        x = self.extract_feat(inputs[0], attn_type=attn_type)
        return x

    def forward(self, inputs: Tensor, return_feat=False, attn_type='maskclip') -> Tensor:
        """Encode images with backbone and decode into a semantic segmentation
        map of the same size as input."""
        inputs = self.clip_Transform(inputs)
        # print(f"forward state [HOOK] self.hook_features ID: {id(self.hook_features)}")
        x = self.extract_feat(inputs, attn_type=attn_type)
        if return_feat:
            seg_logits, feats = self.decode_head(x, return_feat)
            return seg_logits, feats
        else:
            seg_logits = self.decode_head(x)
        return seg_logits

class MaskClipHead(nn.Module):
    def __init__(self, clip_model, class_names, in_channels=3, text_channels=512, use_templates=False, pretrained=None,
                 **kwargs):
        super(MaskClipHead, self).__init__()

        self.text_channels = text_channels
        self.clip_model = clip_model
        self.pretrained = pretrained
        self.class_names = class_names
        # self.class_names_path = class_names_path # 修改成类别名的txt文件路径
        # self.text_embeddings_path = '/space0/liangc/code/study/OVSS/clip_dinoiser/pretrained/context60_ViT16_clip_text_my1.pth'
        self.in_channels = in_channels
        self.use_templates = use_templates
        self.tokenizer = get_tokenizer(clip_model)
        
        self.model, _ = create_model_from_pretrained(clip_model, pretrained=pretrained)
        self.model = self.model.to(device='cuda')
        self.model.eval()
        # 使用 register_buffer 注册的张量默认不会计算梯度，且不会参与优化  这里是分类器,只在推理阶段使用
        # self.register_buffer("class_embeddings", self._get_class_embeddings(model, class_names))
        # 将clip内部的768通过CLIP本身的proj映射到512维度
        self.proj = nn.Conv2d(self.in_channels, text_channels, 1, bias=False)
        self.proj.weight = nn.Parameter(self.model.visual.proj.t()[:, :, None, None])

        # 初始化阶段不对分类头的类别权重进行加载，在测试阶段再手动加载
        # self.register_buffer('text_embeddings', torch.randn(60, text_channels))
        self.register_buffer("text_embeddings", self._get_class_embeddings(self.model, self.class_names))
        # if self.text_embeddings_path is None:
        #     self.text_embeddings = nn.Parameter(torch.zeros(len(self.class_names), text_channels))
        #     nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
        # else:
        #     # print(f'len(class_names): {class_names}')
        #     # self.register_buffer('text_embeddings', torch.randn(len(self.class_names), text_channels))
        #     self.register_buffer('text_embeddings', torch.randn(60, text_channels))

        #     print(f'text_embeddings shape: {self.text_embeddings.shape}')
        #     self.load_text_embeddings()
            # print(f'text_embeddings shape: {self.text_embeddings.shape}')
    def update_text_embeddings(self, text_embeddings_path):
        if 'text_embeddings' in self._buffers:
            del self._buffers['text_embeddings']
        loaded = torch.load(text_embeddings_path, map_location='cuda')
        loaded = loaded / loaded.norm(dim=1, keepdim=True)
        self.register_buffer('text_embeddings', torch.randn(loaded.shape[0], self.text_channels))
        self.text_embeddings[:, :] = loaded[:, :]
        print(f'Loaded text embeddings from {text_embeddings_path}')

    def load_text_embeddings(self):
        if 'text_embeddings' in self._buffers:
            del self._buffers['text_embeddings']
        loaded = torch.load(self.text_embeddings_path, map_location='cuda')
        loaded = loaded / loaded.norm(dim=1, keepdim=True)
        self.text_embeddings[:, :] = loaded[:, :]
        print(f'Loaded text embeddings from {self.text_embeddings_path}')

    @torch.no_grad()
    def update_vocab(self, class_names):
        # model, _ = create_model_from_pretrained(self.clip_model, pretrained=self.pretrained)
        # model.eval()
        # self.class_embeddings = self._get_class_embeddings(self.model, class_names)
        self.text_embeddings = self._get_class_embeddings(self.model, class_names)
    
    # 推理新数据前手动调用更新类别名，获取 真实类别 和 逻辑类别
    @torch.no_grad()
    def update_vocab_from_file(self, class_names_path, area_thd, prob_thd = 0.0):
        query_words, self.query_idx = get_cls_idx(class_names_path)
        self.num_queries = len(query_words)
        self.num_classes = max(self.query_idx) + 1
        self.query_idx = torch.Tensor(self.query_idx).to(torch.int64).to('cuda')

        query_features = []

        self.text_embeddings = self._get_class_embeddings(self.model, query_words)
        # self.query_features = self._get_class_embeddings(self.model, query_words)
        # 额外加一个背景和置信度参数
        self.area_thd = area_thd
        self.prob_thd = prob_thd


    @torch.no_grad()
    def _embed_label(self, text_model: torch.nn.Module, label: str) -> torch.Tensor:
        """
        Encode label name into a single vector
        """
        if self.use_templates:
            templates = imagenet_templates
        elif "laion" in self.pretrained:
            templates = ['a photo of a {}', 'a photo of an {}']
        else:
            templates = ['a {}']
        all_prompts = [self.tokenizer(template.format(label)).to(dtype=torch.long, device='cuda') for template in templates]
        # all_prompts = [self.tokenizer(template.format(label)).cuda() for template in templates]
        out = text_model.encode_text(torch.cat(all_prompts)) # 这里仅使用文本部分
        out /= out.norm(dim=-1, keepdim=True)
        out = out.mean(dim=0)
        out = out / out.norm()
        return out

    def _get_class_embeddings(self, text_model: torch.nn.Module, class_names: List[str]):
        aug_embeddings = torch.stack([self._embed_label(text_model, label) for label in class_names])
        # normalize vector
        aug_embeddings = aug_embeddings / aug_embeddings.norm(dim=-1, keepdim=True)
        return aug_embeddings.squeeze(1)

    def forward(self, inputs, return_feat=False):
        v = inputs
        # feat = self.proj(v)
        feat = v
        output = self.cls_seg(feat) # 在这后面完成prompt类别到逻辑类别的转变
        if return_feat:
            return output, feat
        return output

    def cls_seg(self, feat):
        feat = feat / feat.norm(dim=1, keepdim=True)
        output = F.conv2d(feat, self.text_embeddings[:, :, None, None])
        output = F.softmax(output * 100, dim=1)
        return output


# 从兼容真实和逻辑类别名的txt文件中获取真实类别名对应的索引，即逻辑类别
def get_cls_idx(path):
    with open(path, 'r') as f:
        name_sets = f.readlines()
    num_cls = len(name_sets)

    class_names, class_indices = [], []
    for idx in range(num_cls):
        names_i = name_sets[idx].split(', ')
        class_names += names_i
        class_indices += [idx for _ in range(len(names_i))]
    class_names = [item.replace('\n', '') for item in class_names]
    return class_names, class_indices