import matplotlib.pyplot as plt
from PIL import Image
from transformers import AutoModelForVision2Seq, AutoProcessor, AutoTokenizer
from image_pixcel_handler import image_pixel_values
from PIL import ImageOps
import math
import json
import os
from math import ceil, sqrt
from datetime import datetime, timezone, timedelta
from pathlib import Path
from typing import Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import cv2


class heatmap_tools():
    # ==== 2) 推断帧数（仅用于切分 image token 段） ====
    @staticmethod
    def infer_T(_pixel_values, _image_sizes):
        if _image_sizes is not None:
            if isinstance(_image_sizes, torch.Tensor):
                if _image_sizes.dim() == 3:  # [B,T,2]
                    return int(_image_sizes.shape[1])
                if _image_sizes.dim() == 2:  # [T,2]
                    return int(_image_sizes.shape[0])
            elif isinstance(_image_sizes, (list, tuple)) and len(_image_sizes) > 0:
                if isinstance(_image_sizes[0], (list, tuple)):
                    return len(_image_sizes)
        if isinstance(_pixel_values, torch.Tensor):
            if _pixel_values.dim() == 5:  # [B,T,C,H,W]
                return int(_pixel_values.shape[1])
            if _pixel_values.dim() in (3, 4):  # [C,H,W] / [B,C,H,W] / [T,C,H,W]
                return 1
        return 1

    # ====  图像 token 掩码 -> 连续 True 段 ====
    @staticmethod
    def get_segments_from_mask(mask_1d: torch.Tensor):
        """给定 [S] 的 bool 掩码，返回连续 True 段 [(s,e), ...]（闭区间）。"""
        m = mask_1d.to(torch.bool)
        if m.numel() == 0:
            return []
        mb = m.to(torch.uint8)
        pad = F.pad(mb, (1, 1), value=0)
        diff = pad[1:] - pad[:-1]
        starts = torch.nonzero(diff == 1, as_tuple=False).squeeze(1)
        ends = torch.nonzero(diff == -1, as_tuple=False).squeeze(1) - 1
        runs = [(int(s.item()), int(e.item())) for s, e in zip(starts, ends)]
        return runs

    @staticmethod
    def build_special_mask(input_ids, cfg_path=None):
        p = cfg_path
        try:
            cfg = json.load(open(p, "r", encoding="utf-8"))
            ids = [int(k) for k, v in cfg.get("added_tokens_decoder", {}).items() if v.get("special", False)]
        except Exception:
            ids = []
        return torch.isin(input_ids,
                          torch.tensor(sorted(set(ids)), device=input_ids.device)) if ids else torch.zeros_like(
            input_ids, dtype=torch.bool)

    # ==== 将 pixel_values 转成 List[np.ndarray(H, W, 3)] (RGB, uint8) ====
    @staticmethod
    def _pixel_values_to_rgb_images(pixel_values):
        """
        将 pixel_values 转成 List[np.ndarray(H, W, 3)] (RGB, uint8)
        支持输入:
        - torch.Tensor 或 np.ndarray
        - 形状 [B,C,H,W], [B,T,C,H,W], [T,C,H,W], [C,H,W]
        - 值域 任意（逐图 min-max 到 [0,255]，避免全黑）
        """
        # 2. 确定 C, H, W（以 C=3, H=14, W=28 为例）
        C = 3
        H = 14
        W = 28

        # 3. reshape 为 [B, C, H, W]
        pixel_values = pixel_values.reshape(256, C, H, W)  # 256是样本数B
        pixel_values = pixel_values.mean(dim=0, keepdim=True)
        if isinstance(pixel_values, np.ndarray):
            t = torch.from_numpy(pixel_values)
        else:
            t = pixel_values
        t = t.detach().to(dtype=torch.float32, device="cpu")

        # 标准化形状为 [N,C,H,W]
        if t.ndim == 5:  # [B,T,C,H,W] -> [B*T,C,H,W]
            # 256*3*2*14*14
            B, T, C, H, W = t.shape
            t = t.reshape(B * T, C, H, W)
        elif t.ndim == 4:  # [B,C,H,W] 或 [T,C,H,W]
            pass
        elif t.ndim == 3:  # [C,H,W] -> [1,C,H,W]
            C, H, W = t.shape
            t = t.reshape(1, C, H, W)
        else:
            print("t.shape",t.ndim)
            raise ValueError(f"Unsupported pixel_values shape: {tuple(t.shape)}")

        # [N,H,W,C]
        arr = t.permute(0, 2, 3, 1).contiguous()  # float32

        # 逐图 min-max 到 [0,255]
        vmin = arr.amin(dim=(1, 2, 3), keepdim=True)
        vmax = arr.amax(dim=(1, 2, 3), keepdim=True)
        denom = (vmax - vmin).clamp_min(1e-6)
        arr = (arr - vmin) / denom * 255.0
        arr = arr.clamp(0, 255).to(torch.uint8).numpy()  # uint8

        imgs = [arr[i] for i in range(arr.shape[0])]  # RGB
        return imgs

    # ========== 保存热图（同一帧的不同层自动合并成 1 张大图，并在每张小图下方标注层号）==========
    @staticmethod
    def save_text_vision_heatmap_images(
            pixel_values,
            heatmaps,
            out_dir: Optional[Union[str, Path]] = None,
            file_prefix: str = "eagle_llm_attn",
            alpha: float = 0.5,
    ):
        """
        将全部层的热力图叠加到原图上，按“同一帧的所有层”为单位合成网格大图保存。
        约定：compute_text_vision_heatmaps 返回的 heatmaps 顺序是
            [frame0_layer0, frame0_layer1, ..., frame0_layerL-1,
            frame1_layer0, ..., frame1_layerL-1, ...]
        参数:
            pixel_values: 同模型输入，可为 torch.Tensor 或 np.ndarray。
            heatmaps    : torch.Tensor/np.ndarray，形状 [T_total,Hg,Wg] 或 [Hg,Wg]。
            out_dir     : 输出目录。
            file_prefix : 文件名前缀。
            alpha       : 叠加透明度（0~1）。
        """
        out_dir_effective = Path(out_dir)
        os.makedirs(out_dir_effective, exist_ok=True)

        # 统一热图形状为 [T_total,Hg,Wg]
        if isinstance(heatmaps, np.ndarray):
            h = torch.from_numpy(heatmaps)
        else:
            h = heatmaps
        h = h.detach().cpu().float()
        if h.dim() == 2:
            h = h.unsqueeze(0)
        elif h.dim() != 3:
            raise ValueError(f"[tvattn] heatmaps 形状不支持: {tuple(h.shape)}（期望 [T,H,W] 或 [H,W]）")

        # 原图转 RGB uint8 列表
        imgs = heatmap_tools._pixel_values_to_rgb_images(pixel_values)  # List[np.uint8(H,W,3)]

        num_frames = len(imgs)
        T_total = h.shape[0]
        if num_frames == 0 or T_total == 0:
            return

        # 推断每帧层数（整数除法；若除不尽则最后一帧按可用数量）
        layers_per_frame = max(1, T_total // num_frames)

        ts = datetime.now(timezone(timedelta(hours=8))).strftime("%Y%m%d_%H%M%S")

        for i in range(num_frames):
            img_rgb = imgs[i]  # (H,W,3), RGB, uint8
            H, W = img_rgb.shape[:2]
            img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)

            # 取出该帧的全部层热图
            start = i * layers_per_frame
            end = min((i + 1) * layers_per_frame, T_total)
            this_layers = h[start:end]  # [L_i, Hg, Wg]
            L_i = this_layers.shape[0]

            tiles = []
            for l in range(L_i):
                heat = this_layers[l].numpy()  # (Hg,Wg), float
                # 归一化到 [0,1]
                hmin, hmax = float(heat.min()), float(heat.max())
                denom = (hmax - hmin) if (hmax - hmin) > 1e-6 else 1e-6
                heat01 = (heat - hmin) / denom

                # resize 到原图大小
                heat_resized = cv2.resize(heat01, (W, H), interpolation=cv2.INTER_CUBIC)
                heat_u8 = (heat_resized * 255.0).clip(0, 255).astype(np.uint8)

                # 伪彩色并叠加
                heat_color_bgr = cv2.applyColorMap(heat_u8, cv2.COLORMAP_JET)
                overlay_bgr = cv2.addWeighted(img_bgr, 1.0 - float(alpha), heat_color_bgr, float(alpha), 0.0)

                # 生成“原图|叠加”的小图（更直观）
                side_bgr = np.concatenate([img_bgr, overlay_bgr], axis=1)  # [H, 2W, 3]

                # 在底部加一条白色字幕条并写入层号
                caption_h = max(28, H // 20)
                caption = np.full((caption_h, side_bgr.shape[1], 3), 255, dtype=np.uint8)
                label = f"Layer {l}"
                cv2.putText(caption, label, (12, caption_h - 8), cv2.FONT_HERSHEY_SIMPLEX,
                            0.7, (0, 0, 0), 2, lineType=cv2.LINE_AA)

                tile = np.concatenate([side_bgr, caption], axis=0)  # [H+cap, 2W, 3]
                tiles.append(tile)

            # 网格排版
            cols = int(ceil(sqrt(L_i)))
            rows = int(ceil(L_i / cols))

            # 缩放每个 tile，避免超大图（按宽度到 640 像素）
            thumb_w = 640
            scaled_tiles = []
            for t in tiles:
                h_t, w_t = t.shape[:2]
                scale = thumb_w / float(w_t)
                t_resz = cv2.resize(t, (thumb_w, max(1, int(h_t * scale))), interpolation=cv2.INTER_AREA)
                scaled_tiles.append(t_resz)

            # 补空白填满网格
            tile_h = max(t.shape[0] for t in scaled_tiles)
            tile_w = max(t.shape[1] for t in scaled_tiles)
            blank = np.full((tile_h, tile_w, 3), 255, dtype=np.uint8)

            grid = []
            idx = 0
            for r in range(rows):
                row_imgs = []
                for c in range(cols):
                    if idx < len(scaled_tiles):
                        t = scaled_tiles[idx]
                        # 居中贴到标准 tile 尺寸上
                        pad = blank.copy()
                        y0 = (tile_h - t.shape[0]) // 2
                        x0 = (tile_w - t.shape[1]) // 2
                        pad[y0:y0 + t.shape[0], x0:x0 + t.shape[1]] = t
                        row_imgs.append(pad)
                        idx += 1
                    else:
                        row_imgs.append(blank.copy())
                grid.append(np.concatenate(row_imgs, axis=1))
            big_img = np.concatenate(grid, axis=0)

            out_path = os.path.join(out_dir_effective, f"{file_prefix}_{ts}_frame{i + 1}_L{L_i}.png")
            cv2.imwrite(out_path, big_img)
            print(f"[vis_attn] saved: {out_path}")


# @torch.no_grad()
def compute_text_vision_heatmaps(
        input_ids: torch.LongTensor,  # [B, N]
        pixel_values: torch.FloatTensor,  # 仅用于推断帧数/可视化，不参与注意力计算
        image_sizes: Optional[torch.Tensor],  # [[H,W], ...] 或 [B,T,2]；可为 None
        image_token_index,  # ✅ bool mask [S] / [B,S]；也兼容 int 起点（等分）
        input_embeds: Optional[torch.Tensor],  # [B, S, D]，直接送入 language_model
        attns: torch.FloatTensor,
        out_dir: Optional[Union[str, Path]] = None,
):
    """
    返回:
        heatmaps:      torch.FloatTensor [T_total, Hg_max, Wg_max]
                    # 注意：这里的 T_total = 帧数(T) * 有效层数(L_eff)
                    # 顺序为：frame0 的所有层，frame1 的所有层，...
        global_scores: torch.FloatTensor [T_total]          # 每张小图（某帧某层）的 vec01 均值(0~1)
        attn_matrix:   torch.FloatTensor [T_total, P_max]   # 每张小图的展平注意力(0~1)，右侧补零到 P_max
    """

    device = input_embeds.device
    B, S, _ = input_embeds.shape

    if not isinstance(attns, (list, tuple)) or len(attns) == 0:
        print("attentions Tensor is none")
        return torch.empty(0, 1, 1), torch.empty(0), torch.empty(0, 1)

    # 如果图片张数不确定，需要推断输入了几张图，但eagle 的 T_expected = 3
    T_expected = heatmap_tools.infer_T(pixel_values, image_sizes)

    per_b_segments = []
    text_starts = []
    TEXT_START = 0

    if isinstance(image_token_index, torch.Tensor) and image_token_index.dtype == torch.bool:
        # 如果 image_token_index 是一个布尔类型的 Tensor
        img_mask = image_token_index
        if img_mask.dim() == 1:
            img_mask = img_mask.unsqueeze(0).expand(B, -1)  # [B,S]
        else:
            assert img_mask.shape == (B, S), f"image_token_index 形状 {tuple(img_mask.shape)} 与 [B,S]=[{B},{S}] 不匹配"

        for b in range(B):

            runs = heatmap_tools.get_segments_from_mask(img_mask[b])

            text_starts.append(int(runs[-1][0]) if runs else S)
            if T_expected > 0 and len(runs) > T_expected:
                runs = sorted(sorted(runs, key=lambda x: (x[1] - x[0] + 1), reverse=True)[:T_expected],
                              key=lambda x: x[0])
            per_b_segments.append(runs)
        TEXT_START = min(text_starts) if text_starts else S
    else:
        raise ValueError("image_token_index 必须是 bool mask([S]/[B,S]) ")

    TEXT_START = 786  # ==== 文本查询掩码： 默认取 [786, S) 当作文本，是根据eagle text input 格式确定的 ====

    # 去除来自 tokenizer.config 的额外特殊 token
    text_mask = torch.zeros((B, S), dtype=torch.bool, device=device)
    if TEXT_START < S:
        text_mask[:, TEXT_START:S] = True
    special_mask = heatmap_tools.build_special_mask(input_ids)
    text_mask = text_mask & (~special_mask)  # [B,S]
    has_text = text_mask.any(dim=-1)  # [B]

    # ====  遍历“全部层 × 全部帧段”生成热力图 ====
    heatmaps_list = []
    scores_list = []
    vec_list = []
    Hg_max = Wg_max = P_max = 1

    for layer_idx, att in enumerate(attns):
        # 跨 head 平均 -> [B,S,S]
        attn_mean = att.mean(dim=1)

        # 预先计算每个 batch 的 head_avg（文本 query 聚合）
        head_avg_list = []
        for b in range(B):
            if has_text[b]:

                count = text_mask[b].sum().clamp(min=1)
                head_avg_b = (attn_mean[b] * text_mask[b].to(attn_mean.dtype).unsqueeze(-1)).sum(dim=0) / count
            else:
                q_idx = S - 1  # eles final token
                head_avg_b = attn_mean[b, q_idx, :]
            head_avg_list.append(head_avg_b)
        head_avg = torch.stack(head_avg_list, dim=0)  # [B,S]

        # 针对该层，逐帧段生成热图
        for b in range(B):
            for (s0, e0) in per_b_segments[b]:
                s = max(0, min(s0, S - 1))
                e = max(0, min(e0, S - 1))
                if e < s:
                    continue

                vec = head_avg[b, s:e + 1]  # [Pi]
                Pi = int(vec.numel())
                if Pi == 0:
                    continue

                # 归一化到 [0,1]
                vmin, vmax = vec.min(), vec.max()
                denom = (vmax - vmin).clamp(min=1e-6)
                vec01 = (vec - vmin) / denom

                # 近方形网格
                Hg = int(math.sqrt(Pi)) or 1
                Wg = int(math.ceil(Pi / max(Hg, 1)))
                P = Hg * Wg

                if P > Pi:
                    pad = torch.zeros(P - Pi, dtype=vec01.dtype, device=vec01.device)
                    vec01_pad = torch.cat([vec01, pad], dim=0)
                else:
                    vec01_pad = vec01

                grid = vec01_pad.view(Hg, Wg).detach().cpu().float()
                heatmaps_list.append(grid)
                scores_list.append(vec01.mean().item())
                vec_list.append(vec01.detach().cpu().float())

                Hg_max = max(Hg_max, Hg)
                Wg_max = max(Wg_max, Wg)
                P_max = max(P_max, Pi)

    if len(heatmaps_list) == 0:
        return torch.empty(0, 1, 1), torch.empty(0), torch.empty(0, 1)

    # 对齐尺寸并堆叠
    padded_grids = []
    for g in heatmaps_list:
        pad_h = Hg_max - g.shape[0]
        pad_w = Wg_max - g.shape[1]
        g_pad = F.pad(g, (0, pad_w, 0, pad_h), value=0.0)
        padded_grids.append(g_pad)
    heatmaps = torch.stack(padded_grids, dim=0)  # [T_total, Hg_max, Wg_max]

    padded_vecs = []
    for v in vec_list:
        if v.numel() < P_max:
            pad = torch.zeros(P_max - v.numel(), dtype=v.dtype)
            v = torch.cat([v, pad], dim=0)
        padded_vecs.append(v)
    attn_matrix = torch.stack(padded_vecs, dim=0)  # [T_total, P_max]
    global_scores = torch.tensor(scores_list, dtype=torch.float32)  # [T_total]

    heatmap_tools.save_text_vision_heatmap_images(
        pixel_values=pixel_values,
        heatmaps=heatmaps,
        out_dir=out_dir,
        file_prefix="eagle_llm_attn",
        alpha=0.4,  # 叠加透明度
    )

    return heatmaps, global_scores, attn_matrix


# # 继承原始视觉编码器类，重写forward方法
# class VisionTransformerWithoutMerger(Qwen2_5_VisionTransformerPretrainedModel):
#     def forward(self, pixel_values, **kwargs):
#         grid_thw = kwargs["grid_thw"]
#         hidden_states = self.patch_embed(pixel_values)
#         # print("hidden_states",hidden_states.shape)
#         rotary_pos_emb = self.rot_pos_emb(grid_thw)
#         # print("grid_thw",grid_thw)
#         window_index, cu_window_seqlens = self.get_window_index(grid_thw)
#         cu_window_seqlens = torch.tensor(
#             cu_window_seqlens,
#             device=hidden_states.device,
#             dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
#         )
#         cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
# 
#         seq_len, _ = hidden_states.size()
#         hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
#         hidden_states = hidden_states[window_index, :, :]
#         hidden_states = hidden_states.reshape(seq_len, -1)
#         rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
#         rotary_pos_emb = rotary_pos_emb[window_index, :, :]
#         rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
#         emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
#         position_embeddings = (emb.cos(), emb.sin())
# 
#         cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
#             dim=0,
#             dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
#         )
#         cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
# 
#         for layer_num, blk in enumerate(self.blocks):
#             if layer_num in self.fullatt_block_indexes:
#                 cu_seqlens_now = cu_seqlens
#             else:
#                 cu_seqlens_now = cu_window_seqlens
#             if self.gradient_checkpointing and self.training:
#                 hidden_states = self._gradient_checkpointing_func(
#                     blk.__call__, hidden_states, cu_seqlens_now, None, position_embeddings
#                 )
#             else:
#                 hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens_now, position_embeddings=position_embeddings)
#         return hidden_states
# 
# 
# # 使用修改后的视觉编码器替换模型中的原始组件
# def modify_model_to_skip_merger(original_model):
#     # 1. 确认原始模型的visual配置
#     config = original_model.visual.config
#     # 2. 创建自定义视觉编码器实例
#     new_visual = VisionTransformerWithoutMerger(config)
#     # 3. 复制原始权重（排除merger相关参数）
#     original_state_dict = original_model.visual.state_dict()
#     # 过滤掉merger的权重（key含"merger"的参数）
#     filtered_state_dict = {k: v for k, v in original_state_dict.items() if "merger" not in k}
#     # 加载过滤后的权重（strict=False避免缺失merger参数报错）
#     new_visual.load_state_dict(filtered_state_dict, strict=False)
#     # 4. 强制替换原始visual（处理可能的属性绑定）
#     # 方式1：直接赋值（优先尝试）
#     original_model.visual = new_visual
#     # 方式2：若方式1无效，用__dict__强制修改（突破@property限制）
#     original_model.__dict__["visual"] = new_visual
#     # print("original_model.visual",original_model.visual)
#     # # 5. 验证替换结果
#     # assert type(original_model.visual) == VisionTransformerWithoutMerger, "替换失败！"
#     # print("模型visual组件替换成功！")
#     return original_model


# 设置中文字体
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]


class Qwen2VLAttentionVisualizer:
    def __init__(self, model_path):
        """初始化Qwen2.5-VL可视化工具"""
        # 使用Vision2Seq模型类加载，更适合多模态任务
        self.model = AutoModelForVision2Seq.from_pretrained(
            model_path,
            trust_remote_code=True,
            device_map="auto",
            torch_dtype=torch.float16  # 使用float16节省显存
        )
        self.tokenizer =  AutoTokenizer.from_pretrained(
            model_path,
            trust_remote_code=True,
            device_map="auto",
            torch_dtype=torch.float16  # 使用float16节省显存
        )
        self.processor = AutoProcessor.from_pretrained(
            model_path,
            trust_remote_code=True
        )
        self.model.eval()
        # 存储图像patch尺寸（用于后续热力图转换）
        self.patch_size = None
        self.attn_outputs = []

    def _hook_attn_weights(self, module, input, output):
        self.attn_outputs.append(output)  # output即attn.proj的输出

    def predict_and_visualize(self, image_path, text="描述这张图片", layer_idx=0, head_idx=0):
        """预测并可视化注意力权重"""
        # 加载图像
        image = Image.open(image_path).convert('RGB')
        image = ImageOps.fit(image, (224, 224))
        # 手动处理图像
        pv, _grid_thw = image_pixel_values(self.model, self.processor, image)
        # print(pv.shape)
        img_w, img_h = image.size
        # 预处理：Qwen2.5-VL需要特定的图文格式，使用<image>标记
        # 构造输入：[{'image': image, 'text': text}]
        # 自动处理图像
        inputs = self.processor(
            text=[text],
            # images=[image,image],
            images=[image],
            return_tensors="pt",
            padding=True
        ).to(self.model.device, dtype=torch.float16)
        # 推理
        with torch.no_grad():
            outputs = self.model(**inputs,
                                 return_dict=True,
                                 output_attentions=True  # 记录注意力权重
                                 )
            # 以 HuggingFace 模型为例，获取输入嵌入
            input_embeds = self.model.get_input_embeddings()(inputs["input_ids"])  # 文本嵌入

            input_ids = inputs["input_ids"]
            # 获取 <image> 标记的 ID
            # image_token_id = self.tokenizer.convert_tokens_to_ids("<|vision_start|>")
            image_token_id = self.tokenizer.convert_tokens_to_ids("<|image_pad|>")
            # image_token_id = self.tokenizer.convert_tokens_to_ids("<|vision_end|>")
            print(image_token_id,"image_token_id")
            print("input_ids",input_ids)
            # <|vision_start|><|image_pad|><|vision_end|>
            # 找到 <image> 在 input_ids 中的位置
            image_pos = input_ids == image_token_id
            # # 假设图像分块 token 数量为 N（需根据模型视觉部分确定，例如 256 或 1024）
            print("image_pos",image_pos.shape)
            N = 256  # 示例：Qwen-VL 可能将图像切分为 256 个 patch
            # image_token_indices = list(range(image_pos, image_pos + 1 + N))  # 包含 <image> 和 N 个分块 token
            # # 生成布尔掩码（长度与 input_ids 相同）
            # image_token_index = torch.zeros_like(input_ids, dtype=torch.bool)
            # image_token_index[image_token_indices] = True

            attns = outputs.attentions
            tv_heatmaps, tv_global_score, tv_matrix = compute_text_vision_heatmaps(
                input_ids=input_ids,
                pixel_values=inputs["pixel_values"],
                image_sizes=[[224, 224], [224, 224], [224, 224]],  # 便于推断帧数T
                image_token_index=image_pos,  # 图像 token 的 bool 掩码
                input_embeds=input_embeds,
                attns=attns,
                out_dir="out_dir",
            )
            #
            # # from transformers.cache_utils import DynamicCache
            # """
            # # 以下的所有输出都根生成的文字有关，没有图像相关的数据
            # 1 torch.Size([1, 249, 152064])
            # 2 <transformers.cache_utils.DynamicCache object at 0x757188996810>
            # 3 torch.Size([1, 1])
            # 4 torch.Size([1, 28, 249, 249])
            # """
            # # print("1",outputs["logits"].shape)
            # # print("2",outputs["past_key_values"])
            # # print("3",outputs["rope_deltas"].shape)
            # # print("4", outputs["attentions"][0].shape)
            # # [28 * "torch.Size([1, 28, 181, 181])"]
            # # print(self.model)
            # # 生成回答（Qwen2.5-VL使用generate方法更合适）
            # generated_ids = self.model.generate(
            #     **inputs,
            #     max_new_tokens=100,
            #     output_attentions=True  # 记录注意力权重
            # )
            # # # 这部分是输出，直接根据关联关系给出答案的部分，也不包含图像激活信息部分。
            # predictions = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
            # print("模型输出",predictions)
            # pixel_values = inputs['pixel_values']
            # # pv = pv.to(self.model.device, dtype=torch.float16)
            # # _grid_thw = _grid_thw.to(self.model.device, dtype=torch.float16)
            # """
            #  - 判断形状是否相同：可以使用 `tensor1.shape == tensor2.shape`
            #  - 判断元素值是否完全相等：可以使用 `torch.equal(tensor1, tensor2)`
            #  - 判断元素值是否在误差范围内相等：可以使用 `torch.allclose(tensor1, tensor2, rtol, atol)`
            # """
            # # print(torch.equal(pv, pixel_values))  手动自动一致
            # # visual_outputs = self.model.visual(pixel_values, grid_thw=_grid_thw)
            # # 将模型参数进行替换
            # # self.model = modify_model_to_skip_merger(self.model)
            # # print(self.model.visual,"视觉部分")
            # # 为32个视觉层注册钩子
            # for i in range(0, 32):
            #     target_block_idx = i  # 0~31，最后一层选31
            #     self.model.visual.blocks[target_block_idx].attn.proj.register_forward_hook(self._hook_attn_weights)
            # # 直接调用视觉编码器
            # self.model.visual(pixel_values, grid_thw=_grid_thw)
            #
            # for i in range(0, 32):
            #     # print(self.attn_outputs)
            #     attn_output = self.attn_outputs[i]  # [batch=1, seq_len, hidden_dim=1280]
            #     # print(attn_output.shape)
            #     attn_output = torch.reshape(attn_output,(16,16,1280))
            #     # print(attn_output.shape)
            #     # 3. 生成热力图
            #     superimposed, grid_weights = feature_to_heatmap(
            #         features=attn_output,
            #         original_image=image
            #     )
            #     # 4. 可视化结果
            #     plt.figure(figsize=(12, 6))
            #     # 子图1：16×16的特征权重网格（原始特征）
            #     plt.subplot(131)
            #     plt.imshow(grid_weights, cmap='jet')
            #     plt.title("16×16特征权重网格")
            #     plt.axis('off')
            #     # 子图2：原始图像
            #     plt.subplot(132)
            #     plt.imshow(image)
            #     plt.title("原始图像（224×224）")
            #     plt.axis('off')
            #     # 子图3：叠加热力图
            #     plt.subplot(133)
            #     plt.imshow(superimposed)
            #     plt.title("特征热力图（224×224）")
            #     plt.axis('off')
            #     plt.tight_layout()
            #     plt.savefig("out_{}.jpg".format(i))


# def feature_to_heatmap(features, original_image, grid_size=(16, 16), patch_pixel_size=14):
#     """
#     将16×16×1280的特征转换为224×224的热力图
#
#     Args:
#         features: 特征张量，形状为(16, 16, 1280)或(1, 16, 16, 1280)
#         original_image: 原始PIL图像（224×224）
#         grid_size: 特征的空间维度，默认(16, 16)
#         patch_pixel_size: 每个特征格子对应的图像像素大小，默认14（224÷16=14）
#     Returns:
#         superimposed: 叠加热力图的图像（224×224×3）
#     """
#     # 1. 处理输入特征形状（确保是16×16×1280）
#     if len(features.shape) == 4:  # 若有batch维度，取第一个样本
#         features = features[0]
#     assert features.shape == (16, 16, 1280), f"特征形状错误，预期(16,16,1280)，实际{features.shape}"
#     features = features.cpu().numpy().astype(np.float32)
#     # 处理异常值并归一化（0~1）
#     features = np.nan_to_num(features, nan=0.0, posinf=0.0, neginf=0.0)
#     if np.max(features) - np.min(features) > 1e-8:
#         features = (features - np.min(features)) / (np.max(features) - np.min(features))
#     else:
#         features = np.zeros_like(features)  # 避免全零值导致的热力图异常
#     #
#     #
#     # features= features.cpu().numpy()
#     # # 2. 通道聚合：将1280维特征压缩为1维（多种方式可选）
#     # # 方式1：平均池化（简单有效，适合全局重要性）
#     # grid_weights = np.mean(features, axis=-1)  # 结果：(16, 16)
#     # # 方式2：最大值池化（突出强响应特征）
#     grid_weights = np.max(features, axis=-1)
#     # # 检查是否为2D数组
#     # if len(grid_weights.shape) != 2:
#     #     raise ValueError(f"grid_weights必须是2D数组，实际为{grid_weights.shape}")
#     #
#     # # 处理异常值（NaN/inf）
#     # if np.isnan(grid_weights).any() or np.isinf(grid_weights).any():
#     #     print("警告：检测到异常值，已替换为0")
#     #     grid_weights = np.nan_to_num(grid_weights, nan=0.0, posinf=0.0, neginf=0.0)
#     # # 转换数据类型为float32（OpenCV最兼容的类型）
#     # grid_weights = grid_weights.astype(np.float32)
#     #
#     # # 归一化到0~1（避免数值范围过大导致的问题）
#     # min_val = np.min(grid_weights)
#     # max_val = np.max(grid_weights)
#     # if max_val - min_val < 1e-8:  # 处理所有值相同的情况
#     #     grid_weights = np.zeros_like(grid_weights)
#     # else:
#     #     grid_weights = (grid_weights - min_val) / (max_val - min_val)
#
#     # 4. 空间放大：将16×16放大到224×224（每个格子放大14倍）
#     # 使用双线性插值（平滑过渡），适合热力图
#     heatmap = cv2.resize(
#         grid_weights,
#         dsize=(original_image.size[0], original_image.size[1]),  # (224, 224)
#         interpolation=cv2.INTER_LINEAR
#     )
#
#     # 5. 转换为彩色热力图（JET色图）
#     heatmap = np.uint8(255 * heatmap)  # 0~255
#     heatmap_color = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
#     heatmap_color = cv2.cvtColor(heatmap_color, cv2.COLOR_BGR2RGB)  # 转换为RGB格式（匹配PIL）
#
#     # 6. 与原始图像叠加（调整透明度）
#     original_np = np.array(original_image)
#     superimposed = cv2.addWeighted(original_np, 0.7, heatmap_color, 0.3, 0)  # 原图60% + 热力图40%
#
#     return superimposed, grid_weights

# 使用示例
if __name__ == "__main__":
    # 模型路径
    model_path = "/home/dengyunfei/.cache/modelscope/hub/models/Qwen/Qwen2.5-VL-7B-Instruct"
    # 初始化可视化工具
    visualizer = Qwen2VLAttentionVisualizer(model_path)
    # 预测并可视化
    result = visualizer.predict_and_visualize(
        image_path="/home/dengyunfei/Desktop/123.png",
        text="""
        <|im_start|>system
        你是一个生物专家，能识别各种动物.<|im_end|>
<|im_start|>user
<|vision_start|><|image_pad|><|vision_end|>
请判断这个动物是什么？<|im_end|>
<|im_start|>assistant
        """,
        layer_idx=0,  # 选择要可视化的层（0表示第一层）
        head_idx=0  # 选择要可视化的注意力头（0表示第一个头）
    )
    print("模型输出:", result)
