import os
import numpy as np
import pandas as pd
import nibabel as nib
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
import torchvision.models as models
import timm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import matplotlib.pyplot as plt  # 新增导入
import torch.nn.functional as F 
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
import numpy as np
from torch.nn.functional import softmax
from test_method import test_model, save_test_report

# 配置参数
DATA_DIR = './datasets/single_model'

BATCH_SIZE = 8
EPOCHS = 20
LEARNING_RATE = 1e-4
IMG_SIZE = 224
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
NUM_CLASS=4
# 加载标签数据

# 数据集类
class MedicalDataset(Dataset):
    def __init__(self, phase, scaler=None):
        self.data_dir = os.path.join(DATA_DIR, phase)

        df = pd.read_csv(os.path.join(self.data_dir, 'label.csv'))
        self.df = df.copy()
        self.valid_indices = self._filter_valid_samples()
        self.df = self.df.iloc[self.valid_indices].reset_index(drop=True)
        self.labels = (self.df['label'] - 1).tolist()
        self.phase = phase
        # 结构化数据标准化
        if scaler is None:
            self.scaler = StandardScaler()
        else:
            self.scaler = scaler

    def _filter_valid_samples(self):
        valid_indices = []
        for idx in range(len(self.df)):
            p_id = str(self.df.iloc[idx]['p_id'])
            try:
                self._validate_file_existence(p_id)
                valid_indices.append(idx)
            except FileNotFoundError as e:
                print(f"忽略无效样本 p_ID: {p_id} - {str(e)}")
        return valid_indices

    def _validate_file_existence(self, p_id):
        img_path = os.path.join(self.data_dir, "images", f"{p_id}_T2_axi_000.nii.gz")
        roi_path = os.path.join(self.data_dir, "masks", f"{p_id}_T2_axi_roi.nii.gz")
        if not all(map(os.path.exists, [img_path, roi_path])):
            raise FileNotFoundError(f"Missing files for p_ID: {p_id}")

    def load_nifti(self, p_id):
        p_id = str(p_id)
        img = nib.load(os.path.join(self.data_dir, "images", f"{p_id}_T2_axi_000.nii.gz")).get_fdata().astype(np.float32)
        roi = nib.load(os.path.join(self.data_dir, "masks", f"{p_id}_T2_axi_roi.nii.gz")).get_fdata().astype(np.uint8)
        
        # 找到包含最大ROI的切片
        roi_sums = roi.sum(axis=(0, 1))
        slice_idx = np.argmax(roi_sums)
        
        # 提取切片
        img_slice = img[..., slice_idx]
        roi_slice = roi[..., slice_idx]
        
        # 改进的ROI处理流程
        # 1. 找到ROI的边界框
        rows = np.any(roi_slice, axis=1)
        cols = np.any(roi_slice, axis=0)
        ymin, ymax = np.where(rows)[0][[0, -1]] if rows.any() else (0, img_slice.shape[0])
        xmin, xmax = np.where(cols)[0][[0, -1]] if cols.any() else (0, img_slice.shape[1])

        # 2. 扩展边界（增加10%的上下文信息）
        height = ymax - ymin
        width = xmax - xmin
        ymin = max(0, int(ymin - 0.1 * height))
        ymax = min(img_slice.shape[0], int(ymax + 0.1 * height))
        xmin = max(0, int(xmin - 0.1 * width))
        xmax = min(img_slice.shape[1], int(xmax + 0.1 * width))

        # 3. 裁剪ROI区域
        cropped_img = img_slice[ymin:ymax, xmin:xmax]
        cropped_roi = roi_slice[ymin:ymax, xmin:xmax]

        # 4. 保持长宽比的缩放
        h, w = cropped_img.shape
        scale_ratio = min(IMG_SIZE/h, IMG_SIZE/w)
        new_h, new_w = int(h * scale_ratio), int(w * scale_ratio)
        
        # 双线性插值缩放图像
        scaled_img = cv2.resize(cropped_img, (new_w, new_h), 
                               interpolation=cv2.INTER_LINEAR)
        # 最近邻插值缩放ROI
        scaled_roi = cv2.resize(cropped_roi.astype(np.uint8), (new_w, new_h),
                               interpolation=cv2.INTER_NEAREST)

        # 5. 填充到目标尺寸
        pad_top = (IMG_SIZE - new_h) // 2
        pad_bottom = IMG_SIZE - new_h - pad_top
        pad_left = (IMG_SIZE - new_w) // 2
        pad_right = IMG_SIZE - new_w - pad_left
        
        padded_img = cv2.copyMakeBorder(scaled_img, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT, 
                                       value=np.percentile(scaled_img, 5))
        padded_roi = cv2.copyMakeBorder(scaled_roi, pad_top, pad_bottom,
                                       pad_left, pad_right, cv2.BORDER_CONSTANT,
                                       value=0)

        # 6. ROI加权增强
        roi_weighted = padded_img * (0.5 + 0.5 * (padded_roi > 0))  # ROI区域增强50%
        
        # 7. 直方图归一化（仅针对ROI区域）
        roi_pixels = roi_weighted[padded_roi > 0]
        if len(roi_pixels) > 0:
            p2, p98 = np.percentile(roi_pixels, (2, 98))
            roi_weighted = np.clip((roi_weighted - p2) / (p98 - p2 + 1e-8), 0, 1)
        return roi_weighted.astype(np.float32)
    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        proc_img = self.load_nifti(row['p_id'])
        return {
        'image': torch.FloatTensor(proc_img).unsqueeze(0),
        'label': row['label'] - 1,
        'p_id': row['p_id']
        }

# 新增SE注意力模块
class SEBlock(nn.Module):
    def __init__(self, channels, reduction=16):
        super().__init__()
        # 自动调整reduction比例防止维度过度压缩
        reduction = max(channels // 16, 4)  # 保证最小中间维度为4
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channels, channels // reduction),
            nn.ReLU(),
            nn.Linear(channels // reduction, channels),
            nn.Sigmoid()
        )
    
    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avgpool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)


class FusionModel(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        # 第一阶段：ResNet特征提取
        original_resnet = models.resnet18(pretrained=True)
        
        # 修改第一层卷积：原始输入为3通道，改为1通道输入
        self.resnet = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False),  # 修改输入通道
            *list(original_resnet.children())[1:-2]  # 保持后续层不变
        )
        # 添加梯度检查
        for param in self.resnet.parameters():
            param.requires_grad = True
        # 第二阶段：ViT特征增强
        self.vit_adapter = nn.Conv2d(512, 768, kernel_size=1)  # 通道数适配
        self.vit_blocks = timm.create_model('vit_base_patch16_224', pretrained=True).blocks  # 仅使用Transformer blocks
        
        # 第三阶段：CNN细粒度特征提取（修改输入通道）
        self.cnn = nn.Sequential(
            nn.Conv2d(768, 32, 3, padding=1),  # 输入通道改为768
            nn.ReLU(),
            nn.Dropout2d(0.3), 
            SEBlock(32),
            nn.MaxPool2d(2),
            
            nn.Conv2d(32, 64, 3, padding=1),
            nn.ReLU(),
            nn.Dropout2d(0.3),
            SEBlock(64),
            nn.MaxPool2d(2),
            
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Dropout(0.4),
            nn.Linear(64, 256)
        )

        # 结构化数据融合
        self.classifier = nn.Sequential(
            nn.Linear(256, 512),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(512, num_classes)
        )

        # 位置编码（需与特征图尺寸匹配）
        self.pos_drop = nn.Dropout(p=0.1)
        self.pos_embed = nn.Parameter(torch.zeros(1, 49, 768))  # 假设resnet输出7x7特征图

    def forward(self, x):
        img = x['image']
        
        # ResNet特征提取
        #resnet_feat = self.resnet(img.repeat(1, 3, 1, 1))  # [B, 512, 7, 7]
        resnet_feat = self.resnet(img)
        # 通道适配
        vit_feat = self.vit_adapter(resnet_feat)  # [B, 768, 7, 7]
        
        # 转换为ViT输入格式
        B, C, H, W = vit_feat.shape
        vit_feat = vit_feat.flatten(2).permute(0, 2, 1)  # [B, 49, 768]
        vit_feat = vit_feat + self.pos_embed
        vit_feat = self.pos_drop(vit_feat) 

        # ViT特征增强
        for blk in self.vit_blocks:
            vit_feat = blk(vit_feat)
        vit_feat = F.dropout(vit_feat, 0.2, training=self.training)
        # 转换回CNN输入格式
        vit_feat = vit_feat.permute(0, 2, 1).view(B, C, H, W)  # [B, 768, 7, 7]
        
        # CNN细粒度处理
        cnn_feat = self.cnn(vit_feat)
        
        # 特征融合
        combined = cnn_feat
        return self.classifier(combined)

def apply_gradcam(model, target_layer, input_tensor, roi_mask):
    # 注册钩子
    features = []
    grads = []
    
    def forward_hook(module, input, output):
        features.append(output.detach())
    
    def backward_hook(module, grad_input, grad_output):
        grads.append(grad_output[0].detach())
    
    forward_handle = target_layer.register_forward_hook(forward_hook)
    backward_handle = target_layer.register_backward_hook(backward_hook)
    
    # 前向传播
    output = model({'image': input_tensor})
    pred_class = output.argmax(dim=1).item()
    
    # 反向传播
    model.zero_grad()
    output[0, pred_class].backward()
    
    # 计算热力图
    weights = torch.mean(grads[-1], dim=(2, 3), keepdim=True)
    cam = torch.sum(weights * features[-1], dim=1, keepdim=True)
    cam = F.relu(cam)
    cam = (cam - cam.min()) / (cam.max() - cam.min() + 1e-8)
    
    # 清理钩子
    forward_handle.remove()
    backward_handle.remove()
    
    return cam.squeeze().cpu().numpy(), pred_class

def load_train_val_test_data():
    # random_state=42
    
    train_dataset = MedicalDataset(phase='train')
    val_dataset = MedicalDataset(phase='val', scaler=train_dataset.scaler)
    test_dataset = MedicalDataset(phase ='test', scaler=train_dataset.scaler)

    labels = train_dataset.labels
    class_counts = np.bincount(labels)
    print(f"训练集类别分布: {class_counts}")
    
    # 计算每个样本的权重
    class_weights = 1. / torch.tensor(class_counts, dtype=torch.float)
    sample_weights = class_weights[labels]
    
    # 设置过采样的总样本数（最大类别的样本数 * 类别数）
    max_count = max(class_counts)
    num_classes = len(class_counts)
    num_samples = max_count * num_classes
    num_samples = len(train_dataset)
    # 创建采样器
    sampler = WeightedRandomSampler(
        weights=sample_weights,
        num_samples=num_samples,
        replacement=True
    )

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,sampler=sampler, pin_memory=True)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, pin_memory=True)

    def show_samples(dataset, num_samples=5):
        plt.figure(figsize=(15, 8))
        indices = np.random.choice(len(dataset), num_samples)
        for i, idx in enumerate(indices):
            sample = dataset[idx]
            plt.subplot(1, num_samples, i+1)
            plt.imshow(sample['image'].squeeze().numpy(), cmap='gray')  # 去除通道维度
            plt.title(f"p_ID: {sample['p_id']}\nLabel: {sample['label']+1}")  # 还原原始标签
            plt.axis('off')
        plt.tight_layout()
        plt.show()

    # show_samples(train_dataset)  # 展示训练集样本
    return val_dataset, train_loader, val_loader, test_loader

def train_and_evaluate(val_dataset, train_loader, val_loader):
    model = FusionModel(num_classes=4).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=0.01)
    best_val_acc = 0.0
    # 训练循环
    for epoch in range(EPOCHS):
        model.train()
        train_loss = 0.0
        for batch in tqdm(train_loader, desc=f'Epoch {epoch+1}'):
            # 数据转移到GPU
            inputs = {
                'image': batch['image'].to(device),
            }
            labels = batch['label'].to(device)
            
            # 前向传播
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
        
        # 验证阶段
        model.eval()

        val_loss, correct = 0.0, 0
        with torch.no_grad():
            for batch in val_loader:
                inputs = {
                    'image': batch['image'].to(device),
                }
                labels = batch['label'].to(device)
                outputs = model(inputs)
                val_loss += criterion(outputs, labels).item()
                correct += (outputs.argmax(1) == labels).sum().item()
        val_acc = correct/len(val_dataset)

        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), 'best_model_resnet_3.pth')
            print(f"Saved new best model with val acc {val_acc:.4f}")

        print(f"Epoch {epoch+1}/{EPOCHS} | "
            f"Train Loss: {train_loss/len(train_loader):.4f} | "
            f"Val Loss: {val_loss/len(val_loader):.4f} | "
            f"Val Acc: {correct/len(val_dataset):.4f}")

# def test_model(test_loader, device):
#     # 加载最佳模型
#     model = FusionModel(NUM_CLASS).to(device)
#     model.load_state_dict(torch.load('best_model_resnet_3.pth'))
#     model.eval()
    
#     all_probs = []
#     all_labels = []
    
#     with torch.no_grad():
#         for batch in test_loader:
#             inputs = {
#                 'image': batch['image'].to(device),
#             }
#             labels = batch['label'].cpu().numpy()
#             outputs = model(inputs)
#             probs = softmax(outputs, dim=1).cpu().numpy()
#             all_probs.extend(probs)
#             all_labels.extend(labels)

#     # 转换为numpy数组
#     y_true = np.array(all_labels)
#     y_score = np.array(all_probs)

#     # 二值化标签（适用于多分类）
#     y_true_bin = label_binarize(y_true, classes=np.arange(NUM_CLASS))

#     # 计算每个类别的ROC曲线和AUC
#     fpr = dict()
#     tpr = dict()
#     roc_auc = dict()
    
#     for i in range(NUM_CLASS):
#         fpr[i], tpr[i], _ = roc_curve(y_true_bin[:, i], y_score[:, i])
#         roc_auc[i] = auc(fpr[i], tpr[i])

#     # 计算微观平均AUC
#     fpr["micro"], tpr["micro"], _ = roc_curve(y_true_bin.ravel(), y_score.ravel())
#     roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])

#     # 绘制ROC曲线
#     plt.figure(figsize=(10, 8))
#     colors = ['blue', 'red', 'green', 'orange', 'purple']
    
#     # 绘制每个类别
#     for i, color in zip(range(NUM_CLASS), colors):
#         plt.plot(fpr[i], tpr[i], color=color, lw=2,
#                  label=f'Class {i} (AUC = {roc_auc[i]:.2f})')

#     # 绘制平均曲线
#     plt.plot(fpr["micro"], tpr["micro"], color='deeppink', linestyle=':', linewidth=4,
#              label=f'Micro-average (AUC = {roc_auc["micro"]:.2f})')

#     # 绘制随机猜测线
#     plt.plot([0, 1], [0, 1], 'k--', lw=2)
#     plt.xlim([0.0, 1.0])
#     plt.ylim([0.0, 1.05])
#     plt.xlabel('False Positive Rate')
#     plt.ylabel('True Positive Rate')
#     plt.title('Multi-class ROC Curve')
#     plt.legend(loc="lower right")
#     plt.show()

#     print("AUC Values:")
#     for i in range(NUM_CLASS):
#         print(f"Class {i}: {roc_auc[i]:.4f}")
#     print(f"Micro-average AUC: {roc_auc['micro']:.4f}")

#     model.train()  # 需要梯度信息
#     target_layer = model.cnn[4]  # 选择目标层
#     return 0

if __name__ == "__main__":
    val_dataset, train_loader, val_loader, test_loader = load_train_val_test_data()
    # train_and_evaluate(val_dataset, train_loader, val_loader)
    train_dataset = train_loader.dataset
    model = FusionModel(num_classes=4).to(device)

    # test_model(test_loader, device)

    model.load_state_dict(torch.load('best_model_resnet_3.pth'))
    
    # 获取测试集结果
    test_results = test_model(model, test_loader, device, num_classes=4)
    
    # 生成测试报告
    save_test_report(test_results, num_classes=4)
