import os
import nibabel as nib
import numpy as np
import torch
from torch.utils.data import Dataset

class PairedNiftiDataset(Dataset):
    def __init__(self, brain_dir=None, hippocampus_dir=None,
                 brain_files=None, hipp_files=None, labels=None,
                 transform=None):
        """
        支持两种初始化方式：
        1) 传入 brain_dir 和 hippocampus_dir，自动扫描目录构造样本
        2) 传入 brain_files, hipp_files, labels，直接用列表构造样本（5折划分时用）

        Args:
            brain_dir (str): 脑数据根目录（按类别子文件夹组织）
            hippocampus_dir (str): 海马数据根目录（按类别子文件夹组织）
            brain_files (list[str]): 脑数据文件完整路径列表
            hipp_files (list[str]): 海马数据文件完整路径列表
            labels (list[int]): 标签列表（数值形式）
            transform (callable): 可选的图像预处理函数
        """
        self.transform = transform
        self.samples = []
        self.class_to_idx = {}

        if brain_files is not None and hipp_files is not None and labels is not None:
            # 直接用传入的列表构造样本
            assert len(brain_files) == len(hipp_files) == len(labels), "文件和标签数量不匹配"
            self.samples = list(zip(brain_files, hipp_files, labels))
        elif brain_dir is not None and hippocampus_dir is not None:
            # 通过扫描文件夹构造样本（老逻辑）
            self.data_B_dir = brain_dir
            self.data_h_dir = hippocampus_dir
            self._build_dataset()
        elif brain_files is not None and hipp_files is not None and labels is not None:
            # 新逻辑：直接使用文件列表
            self.brain_files = brain_files
            self.hipp_files = hipp_files
            self.labels = labels
        else:
            raise ValueError("必须传入 brain_dir 和 hippocampus_dir，或者 brain_files, hipp_files, labels")

    def _build_dataset(self):
        if hasattr(self, 'brain_files'):
            # 处理文件列表
            unique_classes = set([os.path.basename(os.path.dirname(f)) for f in self.brain_files])
            self.class_to_idx = {cls: idx for idx, cls in enumerate(sorted(unique_classes))}
        else:
            # 老逻辑
            classes = sorted(os.listdir(self.data_B_dir))
            self.class_to_idx = {cls: idx for idx, cls in enumerate(classes)}

        for cls in classes:
            b_cls_path = os.path.join(self.data_B_dir, cls)
            h_cls_path = os.path.join(self.data_h_dir, cls)
            if not os.path.isdir(b_cls_path) or not os.path.isdir(h_cls_path):
                continue

            b_files = [f for f in os.listdir(b_cls_path) if f.endswith('.nii') or f.endswith('.nii.gz')]

            for b_file in b_files:
                b_path = os.path.join(b_cls_path, b_file)
                h_file = self._to_hipp_name(b_file)
                h_path = os.path.join(h_cls_path, h_file)

                if os.path.exists(h_path):
                    label_idx = self.class_to_idx[cls]
                    self.samples.append((b_path, h_path, label_idx))
                else:
                    print(f"[警告] 缺失对应的 hippocampus 文件: {h_path}")

    def _to_hipp_name(self, b_file):
        if b_file.endswith('.nii.gz'):
            base = b_file[:-7]
            return base + '_L_Hipp.nii.gz'
        elif b_file.endswith('.nii'):
            base = b_file[:-4]
            return base + '_L_Hipp.nii'
        else:
            raise ValueError(f"不支持的文件扩展名: {b_file}")

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        b_path, h_path, label = self.samples[idx]

        b_img = nib.load(b_path).get_fdata()
        h_img = nib.load(h_path).get_fdata()

        if b_img.shape != h_img.shape:
            raise ValueError(f"[尺寸错误] {b_path} 与 {h_path} 尺寸不一致")

        b_img = self._normalize(b_img).astype(np.float32)
        h_img = self._normalize(h_img).astype(np.float32)

        b_tensor = torch.from_numpy(b_img).unsqueeze(0)
        h_tensor = torch.from_numpy(h_img).unsqueeze(0)

        if self.transform:
            b_tensor = self.transform(b_tensor)
            h_tensor = self.transform(h_tensor)

        return b_tensor, h_tensor, label

    def _normalize(self, arr):
        arr = np.nan_to_num(arr)
        if np.std(arr) == 0:
            return arr
        return (arr - np.min(arr)) / (np.max(arr) - np.min(arr) + 1e-8)
