#!/usr/bin/env python

# Copyright 2024 (Your Name or Organization) and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""_summary_
    兼容act和mt_act后
"""

"""
一个通用的可视化工具模块，用于生成注意力热力图。
这些函数被设计为可被任何策略调用。
"""

import math
import os
import traceback
from typing import Optional

import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib.widgets import CheckButtons
from torch import Tensor


def visualize_attention_single(
        original_image: np.ndarray,
        attn_weights_single_head_cam: torch.Tensor,
        feature_map_size: tuple[int, int],
        blend_alpha: float = 0.4
) -> np.ndarray:
    """
    将单个注意力图叠加到原始图像上，以生成热力图。

    Args:
        original_image (np.ndarray): BGR格式的原始图像。
        attn_weights_single_head_cam (torch.Tensor): 对应单个相机、单个注意力头的扁平化注意力权重。
        feature_map_size (tuple[int, int]): 特征图的 (高度, 宽度)。
        blend_alpha (float): 热力图的透明度。

    Returns:
        np.ndarray: 叠加了热力图的BGR图像。
    """
    h_feat, w_feat = feature_map_size
    attention_map = attn_weights_single_head_cam.reshape(h_feat, w_feat).numpy()

    h_orig, w_orig = original_image.shape[:2]

    # 将注意力图缩放到原始图像尺寸并进行平滑
    resized_attention_map = cv2.resize(attention_map, (w_orig, h_orig), interpolation=cv2.INTER_CUBIC)

    # 归一化并应用颜色映射
    norm_attention_map = cv2.normalize(resized_attention_map, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
    heatmap = cv2.applyColorMap(norm_attention_map, cv2.COLORMAP_JET)

    output_image = original_image.astype(np.uint8)

    # 将热力图与原始图像混合
    superimposed_img = cv2.addWeighted(heatmap, blend_alpha, output_image, 1 - blend_alpha, 0)

    return superimposed_img


def visualize_attention_maps(
        original_images: dict,
        attn_weights: Tensor,
        camera_keys: list[str],
        feature_map_size: tuple[int, int],
        step_counter: int,
        save_dir: str,
        queries_to_visualize: list[int],
        layer_idx: int = -1,
        batch_idx: int = 0,
        average_heads: bool = True,
        blend_alpha: float = 0.4,
        task_embedding: Optional[torch.Tensor] = None,
):
    print(f"\n--- Step {step_counter}: Generating inference attention heatmaps for actions {queries_to_visualize} ---")

    try:
        if not feature_map_size:
            print("[VIZ-WARN] Skipping visualization because feature_map_size could not be determined.")
            return

        # 从注意力权重中提取所需部分
        attn_slice_all_queries = attn_weights.cpu().detach()[layer_idx, batch_idx]

        # 通过总token数和视觉token数计算非视觉token数
        feature_map_area = feature_map_size[0] * feature_map_size[1]
        num_visual_tokens = len(camera_keys) * feature_map_area
        total_tokens = attn_slice_all_queries.shape[-1]
        num_non_image_tokens = total_tokens - num_visual_tokens

        if num_non_image_tokens < 0:
            print(
                f"[VIZ-WARN] Token calculation resulted in a negative value for non-image tokens ({num_non_image_tokens}). Skipping visualization.")
            return

        for action_query_idx in queries_to_visualize:
            if action_query_idx >= attn_slice_all_queries.shape[1]:
                print(f"    - Warning: Skipping action_query_idx {action_query_idx}, out of bounds.")
                continue

            # 提取单个动作查询的所有注意力头和所有键
            attn_slice_single_query = attn_slice_all_queries[:, action_query_idx, :]
            # 仅保留视觉token的注意力权重
            all_visual_attn_with_heads = attn_slice_single_query[:, num_non_image_tokens:]

            # 根据参数决定是平均所有头还是只取第一个头
            if average_heads:
                weights = all_visual_attn_with_heads.mean(dim=0)
            else:
                weights = all_visual_attn_with_heads[0]

            for i, cam_key in enumerate(camera_keys):
                start_idx = i * feature_map_area
                end_idx = start_idx + feature_map_area
                if end_idx > weights.shape[0]:
                    continue

                # 为每个相机生成热力图
                heatmap_img = visualize_attention_single(
                    original_images[cam_key], weights[start_idx:end_idx], feature_map_size, blend_alpha
                )

                # 创建保存路径并保存图像
                action_dir = os.path.join(save_dir, cam_key.replace('.', '_'), f"action_{action_query_idx:03d}")
                os.makedirs(action_dir, exist_ok=True)
                cv2.imwrite(os.path.join(action_dir, f"step_{step_counter:04d}_attn.jpg"), heatmap_img)

            print(f"    - Saved heatmaps for action_query={action_query_idx}")

    except Exception:
        print(f"\n--- Step {step_counter}: An unexpected error occurred during visualization ---")
        traceback.print_exc()


class RealTimeVisualizer:
    def __init__(
            self,
            camera_keys: list[str],
            queries_to_visualize: list[int],
            layer_idx: int,
            batch_idx: int,
            average_heads: bool,
            blend_alpha: float
    ):
        self.camera_keys = camera_keys
        self.queries_to_visualize = queries_to_visualize
        self.layer_idx = layer_idx
        self.batch_idx = batch_idx
        self.average_heads = average_heads
        self.blend_alpha = blend_alpha

        self.fig = None
        self.axes_map = {}
        self.image_artists = {}
        self.check_buttons = None
        self.labels_text = []

    def _setup_gui(self, initial_batch: dict):
        """仅在第一次调用 update 时执行，用于创建和显示 GUI 窗口。"""
        print("--- [Visualizer] Creating persistent GUI window... ---")

        plt.rcParams['toolbar'] = 'none'
        num_cameras = len(self.camera_keys)
        max_cols = 2
        num_rows = math.ceil(num_cameras / max_cols)
        num_cols = min(num_cameras, max_cols)
        fig_width = 5 * num_cols + 2

        self.fig, axes_flat = plt.subplots(num_rows, num_cols, figsize=(fig_width, 5.5 * num_rows), squeeze=False)
        axes_flat = axes_flat.flatten()
        self.fig.suptitle("Real-time Attention Visualizer", fontsize=16)

        for i, cam_key in enumerate(self.camera_keys):
            ax = axes_flat[i]
            img_tensor = initial_batch[cam_key][0].cpu()
            img_np_rgb = (img_tensor.permute(1, 2, 0).numpy() * 255).astype(np.uint8)

            # 保存 Image Artist 对象
            self.image_artists[cam_key] = ax.imshow(img_np_rgb)

            ax.set_title(cam_key, fontsize=14)
            ax.set_xticks([])
            ax.set_yticks([])
            self.axes_map[cam_key] = ax

        for i in range(num_cameras, len(axes_flat)):
            axes_flat[i].axis('off')

        # --- 按钮设置 ---
        self.labels_text = [f"Action {q}" for q in self.queries_to_visualize]
        actives = [False] * len(self.labels_text)

        # 调整按钮区域位置
        rax = self.fig.add_axes([0.9, 0.4, 0.08, 0.3])
        self.check_buttons = CheckButtons(rax, self.labels_text, actives)

        # 字体和样式调整
        for label in self.check_buttons.labels:
            label.set_fontsize(10)

        rects = [p for p in self.check_buttons.ax.patches if isinstance(p, plt.Rectangle)]
        if len(rects) == len(self.labels_text):
            for i, rect in enumerate(rects):
                rect.set_edgecolor("black")
                rect.set_linewidth(1.5)
                # 初始化颜色
                if not actives[i]:
                    rect.set_facecolor("lightgray")

        # 注册回调函数
        self.check_buttons.on_clicked(self._on_check_clicked)

        plt.tight_layout(rect=[0, 0, 0.88, 0.95])
        plt.show(block=False)

        print("--- [Visualizer] GUI initialized. Waiting 2 seconds to ensure rendering... ---")
        plt.pause(2.0)
        print("--- [Visualizer] Start! ---")

    def _on_check_clicked(self, label):
        """处理按钮点击事件：实现单选逻辑，且允许取消选择。"""
        if not self.check_buttons:
            return

        # 1. 暂时关闭事件响应，防止修改状态时触发递归
        self.check_buttons.eventson = False

        try:
            # 获取被点击按钮的索引
            idx_clicked = self.labels_text.index(label)

            # 获取当前的勾选状态（注意：Matplotlib 在调用回调前已经自动切换了该按钮的状态）
            # 例如：原来是 False，你点击后，这里拿到的 status[idx] 已经是 True 了
            current_status = self.check_buttons.get_status()
            is_checked_now = current_status[idx_clicked]

            # 2. 单选逻辑
            if is_checked_now:
                # 情况A：用户勾选了一个按钮
                # 动作：保持这个按钮为True，将其他所有按钮设为False
                for i, status in enumerate(current_status):
                    if i != idx_clicked and status:
                        self.check_buttons.set_active(i)  # set_active 会翻转状态，所以 True->False
            else:
                # 情况B：用户取消勾选了当前按钮
                # 动作：什么都不做。它现在是False，其他按钮也应该是False。
                # 这样就允许了“全部不选”的状态（即显示原图）
                pass

            # 3. 更新按钮样式（选中的是白色/默认色，未选中的是灰色，增加视觉反馈）
            rects = [p for p in self.check_buttons.ax.patches if isinstance(p, plt.Rectangle)]
            new_status = self.check_buttons.get_status()
            if len(rects) == len(new_status):
                for i, rect in enumerate(rects):
                    if not new_status[i]:
                        rect.set_facecolor("lightgray")
                    else:
                        rect.set_facecolor("white")

        except Exception as e:
            print(f"[Visualizer Error] Button click failed: {e}")
        finally:
            # 4. 恢复事件响应
            self.check_buttons.eventson = True

    @torch.no_grad()
    def update(self, attn_weights: Tensor, batch: dict, feature_map_size: tuple[int, int]):
        if self.fig is None:
            self._setup_gui(batch)

        # 获取激活的查询
        active_queries = []
        if self.check_buttons:
            active_statuses = self.check_buttons.get_status()
            for i, status in enumerate(active_statuses):
                if status:
                    active_queries.append(self.queries_to_visualize[i])

        # 准备图像数据
        current_images_bgr = {}
        for cam_key in self.camera_keys:
            img_tensor = batch[cam_key][0].cpu()
            img_np = (img_tensor.permute(1, 2, 0).numpy() * 255).astype(np.uint8)
            current_images_bgr[cam_key] = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)

        final_display_images = {}

        # 如果有选中的动作，显示热力图；否则显示原图
        if active_queries:
            preprocessed_attn = self._preprocess_attn(attn_weights, feature_map_size)
            for cam_key in self.camera_keys:
                if (active_queries[0], cam_key) in preprocessed_attn:
                    # 合并选中的 Query 注意力 (虽然现在单选逻辑下 active_queries 长度通常为 1)
                    combined_attn = torch.zeros_like(preprocessed_attn[(active_queries[0], cam_key)])
                    for query_idx in active_queries:
                        if (query_idx, cam_key) in preprocessed_attn:
                            attn_map = preprocessed_attn[(query_idx, cam_key)]
                            combined_attn = torch.max(combined_attn, attn_map)

                    heatmap_img_bgr = self._visualize_single(current_images_bgr[cam_key], combined_attn,
                                                             feature_map_size)
                    final_display_images[cam_key] = cv2.cvtColor(heatmap_img_bgr, cv2.COLOR_BGR2RGB)
                else:
                    final_display_images[cam_key] = cv2.cvtColor(current_images_bgr[cam_key], cv2.COLOR_BGR2RGB)
        else:
            # 显示原图
            for cam_key in self.camera_keys:
                final_display_images[cam_key] = cv2.cvtColor(current_images_bgr[cam_key], cv2.COLOR_BGR2RGB)

        # 更新 Artist
        for cam_key in self.camera_keys:
            if cam_key in self.image_artists and cam_key in final_display_images:
                self.image_artists[cam_key].set_data(final_display_images[cam_key])

        # 强制刷新
        plt.pause(0.001)

    def close(self):
        if self.fig is not None:
            plt.close(self.fig)
            self.fig = None
            self.axes_map = {}
            self.image_artists = {}
            print("--- [Visualizer] GUI closed. ---")

    def _preprocess_attn(self, attn_weights, feature_map_size):
        preprocessed_attn = {}
        if self.layer_idx >= attn_weights.shape[0]: return {}
        attn_slice = attn_weights.cpu().detach()[self.layer_idx, self.batch_idx]
        feature_map_area = feature_map_size[0] * feature_map_size[1]
        num_visual_tokens = len(self.camera_keys) * feature_map_area
        total_tokens = attn_slice.shape[-1]
        num_non_image_tokens = total_tokens - num_visual_tokens
        if num_non_image_tokens < 0: return {}
        for query_idx in self.queries_to_visualize:
            if query_idx >= attn_slice.shape[1]: continue
            attn_for_query = attn_slice[:, query_idx, num_non_image_tokens:]
            weights = attn_for_query.mean(dim=0) if self.average_heads else attn_for_query[0]
            for i, cam_key in enumerate(self.camera_keys):
                start, end = i * feature_map_area, (i + 1) * feature_map_area
                if end <= weights.shape[0]:
                    preprocessed_attn[(query_idx, cam_key)] = weights[start:end]
        return preprocessed_attn

    def _visualize_single(self, original_image, attn_weights_flat, feature_map_size):
        h_feat, w_feat = feature_map_size
        attention_map = attn_weights_flat.reshape(h_feat, w_feat).cpu().numpy()
        h_orig, w_orig = original_image.shape[:2]
        resized_map = cv2.resize(attention_map, (w_orig, h_orig), interpolation=cv2.INTER_CUBIC)
        norm_map = cv2.normalize(resized_map, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
        heatmap = cv2.applyColorMap(norm_map, cv2.COLORMAP_JET)
        return cv2.addWeighted(heatmap, self.blend_alpha, original_image.astype(np.uint8), 1 - self.blend_alpha, 0)
