import torch
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import cv2


class AttentionVisualizer:
    """注意力可视化器"""
    
    def __init__(self, save_dir: Path):
        self.save_dir = Path(save_dir)
        self.save_dir.mkdir(parents=True, exist_ok=True)
        
    def visualize_attention_weights(self, attention_weights: torch.Tensor, 
                                  input_image: torch.Tensor, layer_name: str):
        """可视化注意力权重"""
        # 转换为numpy数组
        if len(attention_weights.shape) == 4:  # (B, C, H, W)
            attention = attention_weights[0].mean(dim=0).cpu().numpy()  # 平均所有通道
        elif len(attention_weights.shape) == 3:  # (B, H, W)
            attention = attention_weights[0].cpu().numpy()
        else:
            print(f"不支持的attention形状: {attention_weights.shape}")
            return
        
        # 归一化注意力权重
        attention = (attention - attention.min()) / (attention.max() - attention.min() + 1e-8)
        
        # 获取输入图像
        if len(input_image.shape) == 4:  # (B, C, H, W)
            img = input_image[0, 0].cpu().numpy()  # 取第一个batch和通道
        else:
            img = input_image[0].cpu().numpy()
        
        # 创建图形
        fig, axes = plt.subplots(1, 3, figsize=(18, 6))
        fig.suptitle(f'Attention Visualization - {layer_name}', fontsize=16)
        
        # 原始图像
        axes[0].imshow(img, cmap='gray')
        axes[0].set_title('Original Image')
        axes[0].axis('off')
        
        # 注意力权重
        im1 = axes[1].imshow(attention, cmap='hot')
        axes[1].set_title('Attention Weights')
        axes[1].axis('off')
        plt.colorbar(im1, ax=axes[1])
        
        # 叠加显示
        # 将注意力权重resize到与输入图像相同的尺寸
        if attention.shape != img.shape:
            attention_resized = cv2.resize(attention, (img.shape[1], img.shape[0]))
        else:
            attention_resized = attention
        
        # 创建叠加图像
        overlay = img.copy()
        overlay = (overlay - overlay.min()) / (overlay.max() - overlay.min() + 1e-8)
        
        # 将注意力权重作为热图叠加
        colored_attention = plt.cm.hot(attention_resized)[:, :, :3]  # RGB
        overlay_colored = plt.cm.gray(overlay)[:, :, :3]  # RGB
        
        blended = 0.6 * overlay_colored + 0.4 * colored_attention
        
        axes[2].imshow(blended)
        axes[2].set_title('Attention Overlay')
        axes[2].axis('off')
        
        plt.tight_layout()
        safe_layer_name = layer_name.replace('.', '_').replace('/', '_')
        save_path = self.save_dir / f'attention_{safe_layer_name}.png'
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"注意力可视化已保存: {save_path}")
    
    def visualize_multi_head_attention(self, attention_weights: torch.Tensor, 
                                     input_image: torch.Tensor, layer_name: str):
        """可视化多头注意力权重"""
        if len(attention_weights.shape) != 4:
            print("多头注意力需要4D张量 (B, H, H, W)")
            return
        
        batch_size, num_heads, height, width = attention_weights.shape
        attention = attention_weights[0].cpu().numpy()  # 取第一个batch
        
        # 计算显示的头数
        num_heads_to_show = min(num_heads, 8)
        cols = 4
        rows = (num_heads_to_show + cols - 1) // cols
        
        fig, axes = plt.subplots(rows, cols, figsize=(16, 4 * rows))
        fig.suptitle(f'Multi-Head Attention - {layer_name}', fontsize=16)
        
        if rows == 1:
            axes = axes.reshape(1, -1)
        
        for i in range(num_heads_to_show):
            row = i // cols
            col = i % cols
            
            head_attention = attention[i]
            head_attention = (head_attention - head_attention.min()) / (head_attention.max() - head_attention.min() + 1e-8)
            
            im = axes[row, col].imshow(head_attention, cmap='hot')
            axes[row, col].set_title(f'Head {i}')
            axes[row, col].axis('off')
            plt.colorbar(im, ax=axes[row, col], fraction=0.046, pad=0.04)
        
        # 隐藏多余的子图
        for i in range(num_heads_to_show, rows * cols):
            row = i // cols
            col = i % cols
            axes[row, col].axis('off')
        
        plt.tight_layout()
        safe_layer_name = layer_name.replace('.', '_').replace('/', '_')
        save_path = self.save_dir / f'multi_head_attention_{safe_layer_name}.png'
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"多头注意力可视化已保存: {save_path}")
