# 新增导入
import os
import torch
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
from torch import nn
from torchvision.models import resnet50
from tqdm import tqdm
import numpy as np
from torch.utils.data import DataLoader
from model_effcientNet.dataset_SUVmax import MRIDataset
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import torchvision.transforms as transforms
import matplotlib
matplotlib.use('Agg')  # 非交互式后端
import matplotlib.pyplot as plt
from matplotlib import cm
import cv2


class Backbone(nn.Module):
    def __init__(self):
        super().__init__()
        base_model = resnet50(pretrained=False)
        encoder_layers = list(base_model.children())
        self.backbone = nn.Sequential(*encoder_layers[:9])
        
    def forward(self, x):
        return self.backbone(x)

class ModalitySpecificNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = Backbone()
        self.backbone.load_state_dict(torch.load("./pretrained_model/RadImageNet_pytorch/ResNet50.pt"))
    
        self.feature_maps = None
        self.target_layer = self.get_target_layer()
        self.target_layer.register_forward_hook(self.save_feature_maps)
    
    def get_target_layer(self):
        resnet = self.backbone.backbone
        layer4 = resnet[7]
        # 返回最后一个卷积块中的conv3
        return layer4[2].conv3
    
    def save_feature_maps(self, module, input, output):
        self.feature_maps = output.detach()
    
    def forward(self, imgs, rois, labels):
        feature = self.backbone(imgs).squeeze(-1).squeeze(-1)
        return feature
    
    def visualize_features(self, imgs, pids, save_dir="feature_visualization", top_k=4):
        os.makedirs(save_dir, exist_ok=True)
        self.eval()
        with torch.no_grad():
            rois = torch.zeros_like(imgs)  # 虚拟ROI
            labels = torch.zeros(len(imgs), dtype=torch.long)
            _ = self(imgs, rois, labels)
        
        if self.feature_maps is None:
            print("警告：未捕获到特征图!")
            return
        for i, pid in enumerate(pids):
            # 原始图像处理 [C, H, W] -> [H, W, C]
            img_np = imgs[i].cpu().numpy().transpose(1, 2, 0)
            img_np = (img_np * 0.5) + 0.5 
            feats = self.feature_maps[i].cpu().numpy()
            channel_importance = np.mean(feats, axis=(1, 2))
            top_indices = np.argsort(channel_importance)[-top_k:][::-1]

            fig, axes = plt.subplots(1, top_k + 1, figsize=(25, 7))
            fig.suptitle(f"PID: {pid} - Top {top_k} Feature Channels", fontsize=18, y=0.95)
            
            # 显示原始图像
            axes[0].imshow(img_np[..., 1], cmap='gray')
            axes[0].set_title("Original Image", fontsize=12)
            axes[0].axis('off')
            
            plt.subplots_adjust(wspace=0.15, hspace=0.05)
            for j, idx in enumerate(top_indices, 1):
                feat_map = feats[idx]
                feat_map = cv2.resize(feat_map, (224, 224), interpolation=cv2.INTER_CUBIC)
                feat_map = (feat_map - feat_map.min()) / (feat_map.max() - feat_map.min() + 1e-8)
                feat_map = cv2.GaussianBlur(feat_map, (5, 5), 0)
                heatmap = cm.jet(feat_map)[..., :3]

                overlay = 0.5 * img_np + 0.5 * heatmap
                axes[j].imshow(overlay)
                axes[j].set_title(f"Channel {idx}\nAct: {channel_importance[idx]:.4f}", fontsize=12)
                axes[j].axis('off')
                
                if j == top_k:
                    cax = fig.add_axes([0.92, 0.15, 0.02, 0.7])
                    norm = plt.Normalize(vmin=0, vmax=1)
                    sm = plt.cm.ScalarMappable(cmap=cm.jet, norm=norm)
                    sm.set_array([])
                    cbar = fig.colorbar(sm, cax=cax)
                    cbar.set_label('Activation Intensity', fontsize=10)
            
            plt.savefig(f"{save_dir}/PID_{pid}_features.png", 
                        bbox_inches='tight', 
                        dpi=150) 
            plt.close()
            print(f"可视化结果已保存至: {save_dir}/PID_{pid}_features.png")

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = ModalitySpecificNet().to(device)
    data_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Resize((224, 224)),
        transforms.Normalize([0.5], [0.5]),
        transforms.Lambda(lambda x: x.squeeze(0)),
    ])
    
    train_dataset = MRIDataset(
        root_dir='D:/PyChrom/PythonProject/2.medical_image/DeepTrip/datasets/pT/three_label_data/train',
        transform=data_transform
    )
    
    # 创建专门用于获取前100个样本的数据加载器
    subset_indices = list(range(min(100, len(train_dataset))))
    subset = torch.utils.data.Subset(train_dataset, subset_indices)
    subset_loader = DataLoader(subset, batch_size=4, shuffle=False)  # 使用固定批次大小，不随机打乱
    
    # 创建保存目录
    save_dir = "feature_visualization_100_samples_cor"
    os.makedirs(save_dir, exist_ok=True)
    
    # 处理前100个样本
    processed_count = 0
    with tqdm(total=len(subset), desc="Generating visualizations") as pbar:
        for batch_idx, (imgs, rois, labels, pids) in enumerate(subset_loader):
            # 确保不超过100个样本
            if processed_count >= 100:
                break
                
            # 处理当前批次
            model.visualize_features(imgs.to(device), pids, save_dir=save_dir)
            
            # 更新计数和进度条
            processed_count += len(imgs)
            pbar.update(len(imgs))
            
            print(f"已处理 {processed_count}/100 个样本")
    
    print(f"所有100个样本的特征可视化图已保存到 {save_dir} 目录")