|
|
|
|
|
""" |
|
|
Example script for loading preprocessed LibriBrain MEG data. |
|
|
|
|
|
This script demonstrates how to load and use the preprocessed MEG data |
|
|
with the pnpl library for training machine learning models. |
|
|
""" |
|
|
|
|
|
import numpy as np |
|
|
from pnpl.datasets import GroupedDataset |
|
|
from torch.utils.data import DataLoader |
|
|
import torch |
|
|
|
|
|
def load_preprocessed_data(grouping_level=100, load_to_memory=True): |
|
|
""" |
|
|
Load preprocessed LibriBrain MEG data. |
|
|
|
|
|
Args: |
|
|
grouping_level: Number of samples grouped together (5, 10, 15, 20, 25, 30, 45, 50, 55, 60, or 100) |
|
|
load_to_memory: If True, loads entire dataset to memory for faster access |
|
|
|
|
|
Returns: |
|
|
Tuple of (train_dataset, val_dataset, test_dataset) |
|
|
""" |
|
|
base_path = f"data/grouped_{grouping_level}" |
|
|
|
|
|
|
|
|
train_dataset = GroupedDataset( |
|
|
preprocessed_path=f"{base_path}/train_grouped.h5", |
|
|
load_to_memory=load_to_memory |
|
|
) |
|
|
|
|
|
|
|
|
val_dataset = GroupedDataset( |
|
|
preprocessed_path=f"{base_path}/validation_grouped.h5", |
|
|
load_to_memory=load_to_memory |
|
|
) |
|
|
|
|
|
|
|
|
test_dataset = GroupedDataset( |
|
|
preprocessed_path=f"{base_path}/test_grouped.h5", |
|
|
load_to_memory=load_to_memory |
|
|
) |
|
|
|
|
|
return train_dataset, val_dataset, test_dataset |
|
|
|
|
|
|
|
|
def main(): |
|
|
|
|
|
print("Loading preprocessed MEG data with 100-sample grouping...") |
|
|
train_dataset, val_dataset, test_dataset = load_preprocessed_data( |
|
|
grouping_level=100, |
|
|
load_to_memory=True |
|
|
) |
|
|
|
|
|
print(f"Dataset sizes:") |
|
|
print(f" Train: {len(train_dataset)} samples") |
|
|
print(f" Validation: {len(val_dataset)} samples") |
|
|
print(f" Test: {len(test_dataset)} samples") |
|
|
|
|
|
|
|
|
sample = train_dataset[0] |
|
|
meg_data = sample['meg'] |
|
|
phoneme_label = sample['phoneme'] |
|
|
|
|
|
print(f"\nSample structure:") |
|
|
print(f" MEG shape: {meg_data.shape}") |
|
|
print(f" Phoneme label: {phoneme_label}") |
|
|
|
|
|
|
|
|
print("\nCreating PyTorch DataLoader...") |
|
|
dataloader = DataLoader( |
|
|
train_dataset, |
|
|
batch_size=32, |
|
|
shuffle=True, |
|
|
num_workers=4, |
|
|
pin_memory=True |
|
|
) |
|
|
|
|
|
|
|
|
print("\nExample batch:") |
|
|
for batch_idx, batch in enumerate(dataloader): |
|
|
meg_batch = batch['meg'] |
|
|
phoneme_batch = batch['phoneme'] |
|
|
|
|
|
print(f" Batch {batch_idx}:") |
|
|
print(f" MEG batch shape: {meg_batch.shape}") |
|
|
print(f" Phoneme batch shape: {phoneme_batch.shape}") |
|
|
|
|
|
if batch_idx >= 2: |
|
|
break |
|
|
|
|
|
|
|
|
print("\n" + "="*50) |
|
|
print("Available grouping levels:") |
|
|
print(" - grouped_5: Highest fidelity, largest files") |
|
|
print(" - grouped_10: High fidelity") |
|
|
print(" - grouped_20: Good balance") |
|
|
print(" - grouped_50: Faster loading, moderate averaging") |
|
|
print(" - grouped_100: Fastest loading, most averaging") |
|
|
print("\nChoose based on your requirements:") |
|
|
print(" - For maximum accuracy: use lower grouping (5-20)") |
|
|
print(" - For faster experimentation: use higher grouping (50-100)") |
|
|
print(" - For production models: start with high grouping for prototyping,") |
|
|
print(" then switch to lower grouping for final training") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |