import numpy as np
import cv2
import os
import nibabel as nib
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
import torch
import torch.nn.functional as F

def show_samples(dataset, num_samples=5):
    plt.figure(figsize=(15, 10))  # 调整画布大小适应三行显示
    
    indices = np.random.choice(len(dataset), num_samples)
    
    for i, idx in enumerate(indices):
        sample = dataset[idx]
        p_id = sample['p_id']
        
        # 获取原始图像路径
        img_path = os.path.join(dataset.data_dir, "images", f"{p_id}_T2_axi_000.nii.gz")
        roi_path = os.path.join(dataset.data_dir, "masks", f"{p_id}_T2_axi_roi.nii.gz")
        
        # 加载原始数据
        img = nib.load(img_path).get_fdata().astype(np.float32)
        roi = nib.load(roi_path).get_fdata().astype(np.uint8)
        
        # 找到最大ROI的切片（与处理流程一致）
        roi_sums = roi.sum(axis=(0, 1))
        slice_idx = np.argmax(roi_sums)
        original_slice = img[..., slice_idx]
        roi_slice = roi[..., slice_idx]
        
        # 第一行：叠加ROI轮廓的原始图像
        plt.subplot(3, num_samples, i + 1)
        plt.imshow(original_slice, cmap='gray')
        # 可视化ROI轮廓（绿色线条）
        contours, _ = cv2.findContours(roi_slice, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        for cnt in contours:
            if cv2.contourArea(cnt) > 10:  # 过滤小面积噪声
                # 将轮廓坐标转换为图像坐标（OpenCV格式转Matplotlib）
                cnt = cnt.squeeze().astype(int)
                plt.plot(cnt[:, 0], cnt[:, 1], 'g-', linewidth=1)
        plt.title("Original with ROI", fontsize=8)
        plt.axis('off')
        
        # 第二行：增强后的图像
        plt.subplot(3, num_samples, num_samples + i + 1)
        processed_img = sample['image'].squeeze().numpy()
        plt.imshow(processed_img, cmap='gray')
        plt.title(f"Processed\n(Label: {sample['label']+1})", fontsize=8)
        plt.axis('off')

    plt.tight_layout()
    plt.subplots_adjust(top=0.9)
    plt.suptitle("Data Augmentation Comparison\n(Rows: Original, ROI Overlay, Processed)", y=0.98)
    plt.show()

def plot_training_curves(metrics, save_path="./"):
    """绘制训练曲线并保存图片"""
    plt.figure(figsize=(12, 5))
    
    # 损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(metrics['train_loss'], label='Train Loss', marker='o')
    plt.plot(metrics['val_loss'], label='Val Loss', marker='s')
    plt.title('Training/Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)
    
    # F1曲线
    plt.subplot(1, 2, 2)
    plt.plot(metrics['val_f1'], label='Val F1', color='green', marker='^')
    plt.title('Validation F1 Score')
    plt.xlabel('Epoch')
    plt.ylabel('F1 Score')
    plt.legend()
    plt.grid(True)
    
    # 调整布局并保存
    plt.tight_layout()
    plt.savefig(f"{save_path}/training_curves.png")
    plt.close()
    print(f"Training curves saved to {save_path}/training_curves.png")

def visualize_gradcam(model, test_loader, device, IMG_SIZE, num_samples=5):
    """可视化改进的Grad-CAM分析，包含ROI标注"""
    # 模型设置
    model.load_state_dict(torch.load('best_model_ori.pth'))
    model.eval()

    # 特征层选择
    target_layer = model.cross_fusion

    # 钩子函数和缓存
    features, grads = [], []
    
    def forward_hook(module, input, output):
        features.append(output.detach())
    
    def backward_hook(module, grad_input, grad_output):
        grads.append(grad_output[0].detach())

    # 注册钩子
    forward_handle = target_layer.register_forward_hook(forward_hook)
    backward_handle = target_layer.register_full_backward_hook(backward_hook)

    # 获取测试样本
    dataset = test_loader.dataset
    indices = np.random.choice(len(dataset), num_samples, replace=False)

    for idx in indices:
        features.clear()
        grads.clear()
        
        sample = dataset[idx]
        img_tensor = sample['image'].unsqueeze(0).to(device)
        p_id = sample['p_id']
        true_label = sample['label'].item()

        # 前向推理
        output = model({'image': img_tensor})
        pred_label = output.argmax().item()

        # 反向传播
        model.zero_grad()
        output[:, pred_label].backward()

        # 生成CAM
        if len(features) == 0 or len(grads) == 0: continue
        
        weights = grads[0].mean(dim=(2,3), keepdim=True)
        cam = (weights * features[0]).sum(1, keepdim=True)
        cam = F.relu(cam)
        cam = (cam - cam.min()) / (cam.max() - cam.min() + 1e-8)
        cam = cam.squeeze().cpu().numpy()

        # 后处理
        cam = cv2.resize(cam, (IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_CUBIC)
        heatmap = (cam * 255).astype(np.uint8)
        heatmap_colored = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
        heatmap_colored = cv2.cvtColor(heatmap_colored, cv2.COLOR_BGR2RGB)/255.0

        # 加载原始图像和mask
        mask_path = f"{dataset.data_dir}/masks/{p_id}_T2_axi_roi.nii.gz"
        mask_data = nib.load(mask_path).get_fdata()
        
        # 找到包含ROI最多的切片（与原始图像对齐）
        slice_idx = np.argmax(mask_data.sum(axis=(0,1)))
        original_slice = nib.load(
            f"{dataset.data_dir}/images/{p_id}_T2_axi_000.nii.gz"
        ).get_fdata()[..., slice_idx]
        mask_slice = mask_data[..., slice_idx].astype(np.uint8)  # 转换为二值图像

        # 可视化对比
        plt.figure(figsize=(18, 4))
        plt.suptitle(f"True Label: {true_label+1} | Predicted: {pred_label+1}")
        
        # 子图1：原始切片 + ROI绿色轮廓
        plt.subplot(131)
        plt.imshow(original_slice, cmap='gray')
        # 使用轮廓标注ROI区域（颜色：lime，线宽：1.5，透明度：0.7）
        plt.contour(mask_slice, levels=[0.5], colors='lime', linewidths=1.5, alpha=0.7) 
        plt.title("Original Slice with ROI Contour"), plt.axis('off')

        # 子图2：预处理后的图像
        processed_img = sample['image'].squeeze().numpy()
        plt.subplot(132), plt.imshow(processed_img, cmap='gray')
        plt.title("Processed Image"), plt.axis('off')

        # 子图3：热力图叠加
        plt.subplot(133)
        plt.imshow(processed_img, cmap='gray')
        plt.imshow(heatmap_colored, alpha=0.5)
        plt.title("Fusion Feature Activation"), plt.axis('off')

        plt.tight_layout()
        plt.savefig(f'./gradcam_figure/{p_id}_gradcam.png', bbox_inches='tight', dpi=300)
        plt.show()

    # 清理钩子
    forward_handle.remove()
    backward_handle.remove()