from typing import Literal
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy.ndimage import zoom

from model.aynet import AYNet


# ==================== 可解释性分析模块 ====================

class InterpretabilityConfig:
    """可解释性配置类"""
    def __init__(self):
        self.enable_cam = False
        self.enable_feature_viz = False
        self.enable_attention_viz = False
        self.save_activations = False
        self.target_layers = ['encoder_bottom', 'decoder_3', 'decoder_2', 'decoder_1', 'final_layer']
    
    def enable_all(self):
        """启用所有可解释性功能"""
        self.enable_cam = True
        self.enable_feature_viz = True
        self.enable_attention_viz = True
        self.save_activations = True
    
    def disable_all(self):
        """禁用所有可解释性功能"""
        self.enable_cam = False
        self.enable_feature_viz = False
        self.enable_attention_viz = False
        self.save_activations = False


class CAMVisualizer:
    """类激活图(CAM)可视化器"""
    def __init__(self, model):
        self.model = model
    
    def generate_gradcam(self, input_tensor, target_layer='encoder_bottom'):
        """生成指定层的Grad-CAM"""
        if not hasattr(self.model, 'enable_cam') or not self.model.enable_cam:
            raise ValueError("CAM功能未启用，请在模型初始化时设置enable_cam=True")
        
        # 前向传播
        output = self.model(input_tensor[0], input_tensor[1])
        
        # 反向传播
        self.model.zero_grad()
        loss = output.mean()  # 或使用特定的损失函数
        loss.backward()
        
        # 获取激活和梯度
        activations = self.model.activations.get(target_layer)
        gradients = self.model.gradients.get(target_layer)
        
        if activations is None or gradients is None:
            raise ValueError(f"目标层 {target_layer} 的激活或梯度未找到")
        
        # 计算权重
        weights = torch.mean(gradients, dim=(2, 3), keepdim=True)
        
        # 生成CAM
        cam = torch.sum(weights * activations, dim=1, keepdim=True)
        cam = F.relu(cam)  # 应用ReLU
        
        # 归一化
        cam = cam / (torch.max(cam) + 1e-8)
        
        return cam
    
    def visualize_cam(self, original_image, cam, alpha=0.4, colormap=cv2.COLORMAP_JET):
        """将CAM叠加到原始图像上"""
        # 调整CAM尺寸以匹配原始图像
        cam_np = cam.squeeze().cpu().numpy()
        h, w = original_image.shape[-2:]
        cam_resized = cv2.resize(cam_np, (w, h))
        
        # 应用颜色映射
        cam_colored = cv2.applyColorMap(np.uint8(255 * cam_resized), colormap)
        cam_colored = cv2.cvtColor(cam_colored, cv2.COLOR_BGR2RGB)
        
        # 归一化原始图像
        orig_np = original_image.squeeze().cpu().numpy()
        orig_normalized = (orig_np - orig_np.min()) / (orig_np.max() - orig_np.min())
        orig_rgb = np.stack([orig_normalized] * 3, axis=-1)
        
        # 叠加
        overlay = alpha * cam_colored/255.0 + (1 - alpha) * orig_rgb
        
        return overlay, cam_resized


class FeatureVisualizer:
    """特征图可视化器"""
    def __init__(self, model):
        self.model = model
    
    def plot_feature_maps(self, input_tensor, layer_name='encoder_bottom', 
                         max_channels=16, figsize=(15, 10), save_path=None):
        """绘制指定层的特征图"""
        if not hasattr(self.model, 'enable_cam') or not self.model.enable_cam:
            raise ValueError("CAM功能未启用，无法访问激活。")
        
        # 前向传播
        with torch.no_grad():
            _ = self.model(input_tensor[0], input_tensor[1])
        
        # 获取激活
        activations = self.model.activations.get(layer_name)
        if activations is None:
            raise ValueError(f"层 {layer_name} 的激活未找到")
        
        # 绘制
        channels = min(activations.shape[1], max_channels)
        cols = 4
        rows = (channels + cols - 1) // cols
        
        fig, axes = plt.subplots(rows, cols, figsize=figsize)
        if rows == 1:
            axes = [axes] if cols == 1 else axes
        else:
            axes = axes.flatten()
        
        for i in range(channels):
            feature_map = activations[0, i].cpu().numpy()
            axes[i].imshow(feature_map, cmap='viridis')
            axes[i].set_title(f'通道 {i}')
            axes[i].axis('off')
        
        # 隐藏未使用的子图
        for i in range(channels, len(axes)):
            axes[i].axis('off')
        
        plt.suptitle(f'{layer_name} 特征图')
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.show()
    
    def compare_feature_evolution(self, input_tensor, layers=['down1', 'down2', 'down3', 'down4'], save_path=None):
        """比较不同层的特征图演化"""
        # 前向传播
        with torch.no_grad():
            _ = self.model(input_tensor[0], input_tensor[1])
        
        fig, axes = plt.subplots(len(layers), 4, figsize=(16, 4*len(layers)))
        
        for row, layer in enumerate(layers):
            if layer in self.model.activations:
                activations = self.model.activations[layer]
                for col in range(min(4, activations.shape[1])):
                    feature_map = activations[0, col].cpu().numpy()
                    axes[row, col].imshow(feature_map, cmap='viridis')
                    axes[row, col].set_title(f'{layer} - 通道 {col}')
                    axes[row, col].axis('off')
        
        plt.suptitle('特征演化对比')
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.show()


class AttentionVisualizer:
    """注意力可视化器"""
    def __init__(self, model):
        self.model = model
        self.attention_weights = {}
        if hasattr(model, 'enable_cam') and model.enable_cam:
            self._register_attention_hooks()
    
    def _register_attention_hooks(self):
        """为注意力块注册钩子"""
        def save_attention(name):
            def hook(module, input, output):
                # 对于注意力块，输出是注意力加权的特征
                # 我们需要访问模块的注意力权重(psi)
                self.attention_weights[name] = output.detach()
            return hook
        
        if hasattr(self.model, 'att1'):
            self.model.att1.register_forward_hook(save_attention('att1'))
        if hasattr(self.model, 'att2'):
            self.model.att2.register_forward_hook(save_attention('att2'))
        if hasattr(self.model, 'att3'):
            self.model.att3.register_forward_hook(save_attention('att3'))
    
    def visualize_attention_maps(self, input_tensor, figsize=(15, 5), save_path=None):
        """可视化所有注意力块的注意力图"""
        # 前向传播
        with torch.no_grad():
            _ = self.model(input_tensor[0], input_tensor[1])
        
        attention_layers = ['att1', 'att2', 'att3']
        available_layers = [layer for layer in attention_layers if layer in self.attention_weights]
        
        if not available_layers:
            print("未找到注意力权重")
            return
        
        fig, axes = plt.subplots(1, len(available_layers), figsize=figsize)
        if len(available_layers) == 1:
            axes = [axes]
        
        for i, layer in enumerate(available_layers):
            # 获取第一个通道的注意力图
            att_map = self.attention_weights[layer][0, 0].cpu().numpy()
            im = axes[i].imshow(att_map, cmap='hot', interpolation='nearest')
            axes[i].set_title(f'注意力 {layer}')
            axes[i].axis('off')
            plt.colorbar(im, ax=axes[i], fraction=0.046, pad=0.04)
        
        plt.suptitle('注意力图可视化')
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.show()


# ==================== 可视化工具函数 ====================

def visualize_complete_analysis(model, x, bfimg, save_prefix=None):
    """
    完整的可视化分析，包括CAM、特征图和注意力图
    Args:
        model: AYNet模型实例
        x: 原始数据输入
        bfimg: 波束形成图像输入
        save_prefix: 保存文件的前缀路径
    """
    if not model.enable_cam:
        print("警告: 模型未启用CAM功能，启用CAM...")
        model.enable_visualization(True)
    
    # 初始化可视化器
    cam_viz = CAMVisualizer(model)
    feature_viz = FeatureVisualizer(model)
    attention_viz = AttentionVisualizer(model)
    
    print("开始可视化分析...")
    
    try:
        # 1. 生成和显示Grad-CAM
        print("1. 生成Grad-CAM...")
        cam = cam_viz.generate_gradcam((x, bfimg), target_layer='encoder_bottom')
        overlay, cam_resized = cam_viz.visualize_cam(bfimg, cam)
        
        plt.figure(figsize=(15, 5))
        plt.subplot(1, 4, 1)
        plt.imshow(x[0, 0].cpu().numpy(), cmap='gray')
        plt.title('原始数据')
        plt.axis('off')
        
        plt.subplot(1, 4, 2)
        plt.imshow(bfimg[0, 0].cpu().numpy(), cmap='gray')
        plt.title('波束形成图像')
        plt.axis('off')
        
        plt.subplot(1, 4, 3)
        plt.imshow(cam_resized, cmap='hot')
        plt.title('Grad-CAM')
        plt.axis('off')
        
        plt.subplot(1, 4, 4)
        plt.imshow(overlay)
        plt.title('CAM叠加')
        plt.axis('off')
        
        plt.tight_layout()
        if save_prefix:
            plt.savefig(f'{save_prefix}_gradcam.png', dpi=300, bbox_inches='tight')
        plt.show()
        
    except Exception as e:
        print(f"Grad-CAM可视化错误: {e}")
    
    try:
        # 2. 可视化特征图
        print("2. 可视化特征图...")
        save_path = f'{save_prefix}_features.png' if save_prefix else None
        feature_viz.plot_feature_maps((x, bfimg), 'encoder_bottom', save_path=save_path)
        
    except Exception as e:
        print(f"特征图可视化错误: {e}")
    
    try:
        # 3. 可视化注意力图
        print("3. 可视化注意力图...")
        save_path = f'{save_prefix}_attention.png' if save_prefix else None
        attention_viz.visualize_attention_maps((x, bfimg), save_path=save_path)
        
    except Exception as e:
        print(f"注意力图可视化错误: {e}")
    
    try:
        # 4. 特征演化对比
        print("4. 特征演化对比...")
        save_path = f'{save_prefix}_evolution.png' if save_prefix else None
        feature_viz.compare_feature_evolution((x, bfimg), save_path=save_path)
        
    except Exception as e:
        print(f"特征演化可视化错误: {e}")
    
    print("可视化分析完成!")


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 创建启用CAM的模型
    model_with_cam = AYNet(in_channels=1, up_mode='transpose', merge_mode='concat', enable_cam=True).to(device)
    
    # 配置可解释性
    config = InterpretabilityConfig()
    config.enable_all()
    print(f"可解释性配置: CAM={config.enable_cam}, 特征可视化={config.enable_feature_viz}, 注意力可视化={config.enable_attention_viz}")
    
    # 运行完整的可视化分析
    try:
        print("开始可解释性分析...")
        visualize_complete_analysis(model_with_cam, x, img, save_prefix='aynet_analysis')
        print("可解释性分析完成！")
    except Exception as e:
        print(f"可解释性分析出错: {e}")
        print("这可能是由于缺少matplotlib或opencv等依赖包")
