import os
import numpy as np
import pandas as pd
import nibabel as nib
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from skimage.transform import resize
from tqdm import tqdm
from matplotlib import pyplot as plt
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from skimage import exposure

# ================== 配置参数 ==================
config = {
    "data_root": "./datasets/single_model",
    "batch_size": 8,
    "num_epochs": 30,
    "learning_rate": 1e-4,
    "input_size": (16, 224, 224),  # 修改为(深度, 高度, 宽度)
    "num_classes": 4,
    "seed": 42,
    "fixed_depth": 16  # 新增固定深度参数
}

# ================== 设置随机种子 ==================
torch.manual_seed(config["seed"])
np.random.seed(config["seed"])

# ================== 数据加载器 ==================
import os
import numpy as np
import pandas as pd
import nibabel as nib
import torch
from torch.utils.data import Dataset
from skimage.transform import resize

class MedicalDataset(Dataset):
    def __init__(self, mode="train"):
        super().__init__()
        self.mode = mode
        self.data_root = config["data_root"]
        
        # 初始化路径
        self.img_dir = os.path.join(self.data_root, mode, "images")
        self.mask_dir = os.path.join(self.data_root, mode, "masks")
        self.label_path = os.path.join(self.data_root, mode, "label.csv")
        
        # 加载标签数据并过滤无效样本
        self.label_df = pd.read_csv(self.label_path)
        self._filter_invalid_samples()
        
        # 标准化参数
        self.norm_method = config.get("normalization", "zscore")  # 从配置获取归一化方法

    def __len__(self):
        return len(self.label_df)

    def _filter_invalid_samples(self):
        """过滤掉掩膜全零的样本"""
        valid_indices = []
        for idx in range(len(self.label_df)):
            p_id = self.label_df.iloc[idx]['p_id']
            mask_path = os.path.join(self.mask_dir, f"{p_id}_T2_axi_roi.nii.gz")
            if os.path.exists(mask_path):
                mask = self.load_nii(mask_path)
                if np.any(mask > 0):
                    valid_indices.append(idx)
            else:
                print(f"Warning: Mask file {mask_path} not found")
        self.label_df = self.label_df.iloc[valid_indices].reset_index(drop=True)

    def load_nii(self, path):
        """加载NIfTI文件并标准化方向"""
        img = nib.load(path)
        img = nib.as_closest_canonical(img)  # 标准化到RAS方向
        return img.get_fdata().astype(np.float32)
    
    def _normalize(self, volume):
        """多种归一化方法选择"""
        if self.norm_method == "zscore":
            mean, std = volume.mean(), volume.std()
            return (volume - mean) / (std + 1e-8)
        elif self.norm_method == "minmax":
            return (volume - volume.min()) / (volume.max() - volume.min() + 1e-8)
        else:  # 原始值仅做缩放
            return volume / (np.percentile(volume, 99) + 1e-8)

    def __getitem__(self, idx):
        # 获取基础数据
        p_id = self.label_df.iloc[idx]['p_id']
        label = self.label_df.iloc[idx]['label'] - 1  # 转0-based
        
        # 加载图像和掩膜
        img_path = os.path.join(self.img_dir, f"{p_id}_T2_axi_000.nii.gz")
        mask_path = os.path.join(self.mask_dir, f"{p_id}_T2_axi_roi.nii.gz")
        img = self.load_nii(img_path)
        mask = self.load_nii(mask_path)
        
        # ROI裁剪
        coords = np.where(mask > 0)
        z_min, z_max = np.min(coords[0]), np.max(coords[0])
        y_min, y_max = np.min(coords[1]), np.max(coords[1])
        x_min, x_max = np.min(coords[2]), np.max(coords[2])
        
        # 扩展上下文区域（增加10%的上下文）
        depth_extension = int(0.1 * (z_max - z_min))
        height_extension = int(0.1 * (y_max - y_min))
        width_extension = int(0.1 * (x_max - x_min))
        
        z_start = max(0, z_min - depth_extension)
        z_end = min(img.shape[0], z_max + depth_extension)
        y_start = max(0, y_min - height_extension)
        y_end = min(img.shape[1], y_max + height_extension)
        x_start = max(0, x_min - width_extension)
        x_end = min(img.shape[2], x_max + width_extension)
        
        roi = img[z_start:z_end, y_start:y_end, x_start:x_end]
        
        # 调整尺寸（使用抗锯齿处理）
        roi = resize(roi, config["input_size"],
                    order=1,  # 双线性插值
                    preserve_range=True)
        
        # 数据归一化
        roi = self._normalize(roi)
        
        # 转换为Tensor [通道数, 深度, 高度, 宽度]
        roi_tensor = torch.FloatTensor(roi).unsqueeze(0)  # 添加通道维度
        label_tensor = torch.LongTensor([label])
        
        return roi_tensor, label_tensor

def get_dataloader(mode="train"):
    dataset = MedicalDataset(mode=mode)
    return DataLoader(
        dataset,
        batch_size=config["batch_size"],
        shuffle=(mode == "train"),
        num_workers=4,
        pin_memory=True
    )

# ================== 2D CNN模型 ==================
class CNN2D(nn.Module):
    def __init__(self):
        super().__init__()
        # 2D卷积层
        self.features = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2),  # 224x224 -> 112x112
            
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(2),  # 112x112 -> 56x56
            
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.MaxPool2d(2),  # 56x56 -> 28x28
        )
        
        # 全连接层
        self.classifier = nn.Sequential(
            nn.Linear(128 * 28 * 28, 512),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(512, config["num_classes"])
        )
        
    def forward(self, x):
        # 输入形状: [batch, 1, depth, H, W]
        batch_size, _, depth, H, W = x.size()
        
        # 重塑张量用于2D卷积处理
        x = x.permute(0, 2, 1, 3, 4)  # [batch, depth, 1, H, W]
        x = x.reshape(batch_size * depth, 1, H, W)  # [batch*depth, 1, H, W]
        
        # 特征提取
        x = self.features(x)  # [batch*depth, 128, 8, 8]
        x = x.view(batch_size * depth, -1)  # [batch*depth, 128 * 8 * 8]
        
        # 聚合切片特征
        x = x.view(batch_size, depth, -1)  # [batch, depth, features]
        x = torch.mean(x, dim=1)  # 沿深度维度平均 [batch, features]
        
        # 分类
        return self.classifier(x)

# ================== 训练流程 ==================
def train_model():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")
    
    # 初始化模型
    model = CNN2D().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=config["learning_rate"])
    
    # 数据加载器
    train_loader = get_dataloader("train")
    val_loader = get_dataloader("val")
    
    best_acc = 0.0
    for epoch in range(config["num_epochs"]):
        # 训练阶段
        model.train()
        running_loss = 0.0
        with tqdm(train_loader, unit="batch") as tepoch:
            for inputs, labels in tepoch:
                tepoch.set_description(f"Epoch {epoch+1}")
                
                inputs = inputs.to(device)
                labels = labels.to(device).view(-1)
                
                optimizer.zero_grad()
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()
                
                running_loss += loss.item()
                tepoch.set_postfix(loss=loss.item())
        
        # 验证阶段
        model.eval()
        correct = 0
        total = 0
        with torch.no_grad():
            for inputs, labels in val_loader:
                inputs = inputs.to(device)
                labels = labels.to(device).view(-1)
                
                outputs = model(inputs)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
        
        val_acc = 100 * correct / total
        print(f"Val Acc: {val_acc:.2f}%")
        
        # 保存最佳模型
        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), "best_model_2d_cnn.pth")
            print(f"New best model saved with acc {best_acc:.2f}%")
    
    print("Training complete")
    return model

def test_model(model, test_loader, device, num_classes):
    """测试模型并返回详细预测结果"""
    model.eval()
    all_probs = []
    all_preds = []
    all_labels = []

    with torch.no_grad():
        for batch in tqdm(test_loader, desc='Testing'):
            images, labels = batch
            images = images.to(device)
            
            # 处理标签维度
            labels = labels.view(-1).cpu().numpy()
            
            # 模型预测
            outputs = model(images)
            probs = F.softmax(outputs, dim=1).cpu().numpy()
            preds = np.argmax(probs, axis=1)
            
            all_probs.extend(probs)
            all_preds.extend(preds)
            all_labels.extend(labels)

    # 从数据集获取p_id列表（假设顺序一致）
    p_ids = test_loader.dataset.label_df['p_id'].tolist()
    
    # 创建结果DataFrame
    results_df = pd.DataFrame({
        'p_id': p_ids,
        'true_label': all_labels,
        'pred_label': all_preds
    })
    
    # 添加每个类别的概率列
    for cls in range(num_classes):
        results_df[f'prob_class_{cls}'] = [prob[cls] for prob in all_probs]

    return results_df

def save_test_report(results_df, num_classes, model_name):
    """保存测试结果和生成可视化报告"""
    # 保存原始数据到Excel
    excel_path = f"{model_name}_test_report.xlsx"
    writer = pd.ExcelWriter(excel_path, engine='xlsxwriter')
    
    # 结果数据表
    results_df.to_excel(writer, sheet_name='Raw Data', index=False)
    
    # 统计信息表（将标签还原到原始范围）
    stats_df = pd.DataFrame({
        'Metric': ['Accuracy', 'Macro F1', 'Weighted F1'],
        'Value': [
            accuracy_score(results_df.true_label+1, results_df.pred_label+1),
            f1_score(results_df.true_label+1, results_df.pred_label+1, average='macro'),
            f1_score(results_df.true_label+1, results_df.pred_label+1, average='weighted')
        ]
    })
    stats_df.to_excel(writer, sheet_name='Statistics', index=False)
    
    # 混淆矩阵（0-based标签）
    cm = confusion_matrix(results_df.true_label, results_df.pred_label)
    cm_df = pd.DataFrame(cm, 
        columns=[f'Pred {i}' for i in range(num_classes)],
        index=[f'True {i}' for i in range(num_classes)])
    cm_df.to_excel(writer, sheet_name='Confusion Matrix')
    
    writer.close()
    
    # 绘制AUC曲线
    plt.figure(figsize=(10, 8))
    y_true = label_binarize(results_df.true_label, classes=range(num_classes))
    y_score = results_df[[f'prob_class_{i}' for i in range(num_classes)]].values
    
    # 计算每个类别的ROC曲线
    fpr = dict()
    tpr = dict()
    roc_auc = dict()
    for i in range(num_classes):
        fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_score[:, i])
        roc_auc[i] = auc(fpr[i], tpr[i])
    
    # 可视化设置
    colors = ['#4285f4', '#db4437', '#f4b400', '#0f9d58', '#ab47bc']
    for i, color in zip(range(num_classes), colors):
        plt.plot(fpr[i], tpr[i], color=color, lw=2,
                 label=f'Class {i} (AUC = {roc_auc[i]:.2f})')
    
    plt.plot([0, 1], [0, 1], 'k--', lw=2)
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Multiclass ROC Curves')
    plt.legend(loc="lower right")
    plt.savefig(f"{model_name}_auc_curves.png", bbox_inches='tight')
    plt.close()

def visualize_sample(roi, label, num_slices=3):
    """
    可视化 ROI 的不同深度切片
    :param roi: Tensor [1, D, H, W]
    :param label: 类别标签
    :param num_slices: 展示的切片数量
    """
    roi = roi.squeeze(0).numpy()  # 转换为 [D, H, W]
    depth = roi.shape[0]
    
    # 选择均匀分布的切片
    slice_indices = np.linspace(0, depth-1, num_slices, dtype=int)
    
    plt.figure(figsize=(15, 5))
    for i, idx in enumerate(slice_indices):
        plt.subplot(1, num_slices, i+1)
        slice_img = roi[idx]
        plt.imshow(slice_img, cmap='gray', vmin=-2, vmax=2)  # 调整显示范围
        plt.title(f'Slice {idx}')
        plt.axis('off')
    plt.suptitle(f'Label: {label.item()} | Shape: {roi.shape}')
    plt.show()

if __name__ == "__main__":
    train_loader = get_dataloader(mode="train")
    batch = next(iter(train_loader))
    rois, labels = batch
    num_samples = 3  # 可视化前3个样本
    for i in range(num_samples):
        roi = rois[i]
        label = labels[i]
        visualize_sample(roi, label)

    # train_model()
    # 初始化模型和数据加载器

    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # model = CNN2D().to(device)
    # model.load_state_dict(torch.load("best_model_2d_cnn.pth"))
    # test_loader = get_dataloader("test")
    # results_df = test_model(model, test_loader, device, num_classes=config["num_classes"])
    # save_test_report(results_df, num_classes=config["num_classes"], model_name="CNN2D")
