libribrain-meg-preprocessed / example_load.py
aleks-wordcab's picture
Upload ./example_load.py with huggingface_hub
f022e4d verified
#!/usr/bin/env python3
"""
Example script for loading preprocessed LibriBrain MEG data.
This script demonstrates how to load and use the preprocessed MEG data
with the pnpl library for training machine learning models.
"""
import numpy as np
from pnpl.datasets import GroupedDataset
from torch.utils.data import DataLoader
import torch
def load_preprocessed_data(grouping_level=100, load_to_memory=True):
"""
Load preprocessed LibriBrain MEG data.
Args:
grouping_level: Number of samples grouped together (5, 10, 15, 20, 25, 30, 45, 50, 55, 60, or 100)
load_to_memory: If True, loads entire dataset to memory for faster access
Returns:
Tuple of (train_dataset, val_dataset, test_dataset)
"""
base_path = f"data/grouped_{grouping_level}"
# Load training data
train_dataset = GroupedDataset(
preprocessed_path=f"{base_path}/train_grouped.h5",
load_to_memory=load_to_memory
)
# Load validation data
val_dataset = GroupedDataset(
preprocessed_path=f"{base_path}/validation_grouped.h5",
load_to_memory=load_to_memory
)
# Load test data
test_dataset = GroupedDataset(
preprocessed_path=f"{base_path}/test_grouped.h5",
load_to_memory=load_to_memory
)
return train_dataset, val_dataset, test_dataset
def main():
# Example 1: Load data with 100-sample grouping
print("Loading preprocessed MEG data with 100-sample grouping...")
train_dataset, val_dataset, test_dataset = load_preprocessed_data(
grouping_level=100,
load_to_memory=True
)
print(f"Dataset sizes:")
print(f" Train: {len(train_dataset)} samples")
print(f" Validation: {len(val_dataset)} samples")
print(f" Test: {len(test_dataset)} samples")
# Example 2: Get a single sample
sample = train_dataset[0]
meg_data = sample['meg'] # MEG signals: (306 channels, time_points)
phoneme_label = sample['phoneme'] # Phoneme class index
print(f"\nSample structure:")
print(f" MEG shape: {meg_data.shape}")
print(f" Phoneme label: {phoneme_label}")
# Example 3: Use with PyTorch DataLoader
print("\nCreating PyTorch DataLoader...")
dataloader = DataLoader(
train_dataset,
batch_size=32,
shuffle=True,
num_workers=4,
pin_memory=True # For GPU training
)
# Example 4: Iterate through a batch
print("\nExample batch:")
for batch_idx, batch in enumerate(dataloader):
meg_batch = batch['meg'] # Shape: (batch_size, 306, time_points)
phoneme_batch = batch['phoneme'] # Shape: (batch_size,)
print(f" Batch {batch_idx}:")
print(f" MEG batch shape: {meg_batch.shape}")
print(f" Phoneme batch shape: {phoneme_batch.shape}")
if batch_idx >= 2: # Show only first 3 batches
break
# Example 5: Different grouping levels for different speed/accuracy trade-offs
print("\n" + "="*50)
print("Available grouping levels:")
print(" - grouped_5: Highest fidelity, largest files")
print(" - grouped_10: High fidelity")
print(" - grouped_20: Good balance")
print(" - grouped_50: Faster loading, moderate averaging")
print(" - grouped_100: Fastest loading, most averaging")
print("\nChoose based on your requirements:")
print(" - For maximum accuracy: use lower grouping (5-20)")
print(" - For faster experimentation: use higher grouping (50-100)")
print(" - For production models: start with high grouping for prototyping,")
print(" then switch to lower grouping for final training")
if __name__ == "__main__":
main()