from dataset import ADNIDataset
from torch.utils.data import WeightedRandomSampler
from sklearn.model_selection import train_test_split
import glob
import os


def get_dataset(cfg):
    if cfg.dataset.name == 'ADNI':
        # 1. 首先，获取所有文件的列表
        all_files = glob.glob(os.path.join(
            cfg.dataset.root_dir, './**/*.nii.gz'), recursive=True)

        # 2. 使用 train_test_split 将文件列表划分为训练和验证两部分
        # test_size=0.2 表示 20% 的数据作为验证集
        # random_state 确保每次划分结果都一样，便于复现实验
        train_files, val_files = train_test_split(
            all_files, test_size=0.2, random_state=42)

        print(f"Total files: {len(all_files)}")
        print(f"Training files: {len(train_files)}")
        print(f"Validation files: {len(val_files)}")

        # 3. 为训练集和验证集分别创建 Dataset 实例
        # 注意：验证集通常不应使用数据增强 (augmentation=False)
        train_dataset = ADNIDataset(
            file_list=train_files, augmentation=True)
        val_dataset = ADNIDataset(
            file_list=val_files, augmentation=False)  # 验证集不进行增强

        sampler = None
        return train_dataset, val_dataset, sampler
    raise ValueError(f'{cfg.dataset.name} Dataset is not available')
