|
import nibabel as nib |
|
import os |
|
import numpy as np |
|
|
|
import datasets |
|
|
|
import boto3 |
|
|
|
|
|
_DESCRIPTION = """\ |
|
fMIR dataset from openneuro.org |
|
""" |
|
|
|
class fMRIConfig(datasets.BuilderConfig): |
|
"""Builder Config for fMRI""" |
|
|
|
def __init__(self, data_url, num_datasets=[10, 1, 1], num_frames=8, sampling_rate=1, **kwargs): |
|
"""BuilderConfig for fMRI. |
|
Args: |
|
data_url: `string`, url to download the zip file from. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(fMRIConfig, self).__init__(**kwargs) |
|
self.data_url = data_url |
|
self.num_datasets = num_datasets |
|
self.num_frames = num_frames |
|
self.sampling_rate = sampling_rate |
|
|
|
class fMRITest(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
fMRIConfig(name="test1", data_url="openneuro.org", version=VERSION, description="fMRI test dataset 1", ), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "test1" |
|
|
|
|
|
def _info(self): |
|
|
|
if self.config.name == "test1": |
|
|
|
|
|
|
|
|
|
|
|
|
|
features = None |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
s3 = boto3.client('s3') |
|
|
|
bucket_name = self.config.data_url |
|
|
|
response = s3.list_objects_v2(Bucket=bucket_name, Prefix='', Delimiter='/') |
|
|
|
folder_names = [x['Prefix'].split('/')[-2] for x in response.get('CommonPrefixes', [])] |
|
print(len(folder_names)) |
|
|
|
ndatasets = self.config.num_datasets |
|
if isinstance(ndatasets, int): |
|
ndatasets = [ndatasets, 10, 10] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"bucket_name": bucket_name, |
|
"folder_names": folder_names[:ndatasets[0]], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"bucket_name": bucket_name, |
|
"folder_names": folder_names[ndatasets[0]:ndatasets[0] + ndatasets[1]], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"bucket_name": bucket_name, |
|
"folder_names": folder_names[ndatasets[0] + ndatasets[1]:ndatasets[0] + ndatasets[1] + ndatasets[2]], |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, bucket_name, folder_names): |
|
|
|
|
|
s3 = boto3.client('s3') |
|
tmp_dir = os.path.join('tmp', folder_names[0]) if len(folder_names) > 0 else 'tmp' |
|
if not os.path.exists(tmp_dir): |
|
os.makedirs(tmp_dir) |
|
anat_file = os.path.join(tmp_dir, 'T1w.nii.gz') |
|
func_file = os.path.join(tmp_dir, 'bold.nii.gz') |
|
|
|
duration = self.config.num_frames * self.config.sampling_rate |
|
|
|
for folder_name in folder_names: |
|
response = s3.list_objects_v2(Bucket=bucket_name, Prefix=folder_name) |
|
for obj in response.get('Contents', []): |
|
obj_key = obj['Key'] |
|
if '_T1w.nii.gz' in obj_key: |
|
|
|
anat_subj = obj_key.split('/')[1] |
|
|
|
|
|
s3.download_file(bucket_name, obj_key, anat_file) |
|
|
|
|
|
anat_header = nib.load(anat_file).header |
|
elif '_bold.nii.gz' in obj_key: |
|
func_subj = obj_key.split('/')[1] |
|
if func_subj == anat_subj: |
|
s3.download_file(bucket_name, obj_key, func_file) |
|
func = nib.load(func_file).get_fdata().astype('float16') |
|
func = np.transpose(func, (3, 0, 1, 2)) |
|
shape = func.shape |
|
|
|
for i in range(0, shape[0] - duration + self.config.sampling_rate, duration): |
|
func_slice = func[i:i+duration:self.config.sampling_rate, :, :, :] |
|
|
|
yield f"{obj_key}-{i}", { |
|
"func": func_slice, |
|
} |
|
|
|
|
|
|
|
|