| | |
| | """ |
| | Generate Schrödinger equation dataset and save to parquet files in chunks. |
| | |
| | Creates samples of 1D time-dependent Schrödinger equation solutions |
| | with harmonic oscillator potential and random Gaussian wave packet |
| | initial conditions. |
| | """ |
| |
|
| | import os |
| | import numpy as np |
| | import pyarrow as pa |
| | import pyarrow.parquet as pq |
| | from dataset import SchrodingerDataset |
| |
|
| |
|
| | def generate_dataset_split( |
| | split_name="train", num_samples=1000, chunk_size=100, output_dir="data" |
| | ): |
| | """ |
| | Generate a dataset split and save as chunked parquet files. |
| | |
| | INSTRUCTIONS FOR CLAUDE: |
| | - This function should work as-is for any dataset following the template |
| | - Only modify the dataset instantiation below if you need custom parameters |
| | """ |
| |
|
| | os.makedirs(output_dir, exist_ok=True) |
| |
|
| | |
| | dataset = SchrodingerDataset( |
| | Lx=20.0, |
| | Nx=256, |
| | hbar=1.0, |
| | mass=1.0, |
| | omega=1.0, |
| | stop_sim_time=2.0, |
| | timestep=1e-3, |
| | ) |
| | |
| | num_chunks = (num_samples + chunk_size - 1) // chunk_size |
| |
|
| | print(f"Generating {num_samples} {split_name} samples in {num_chunks} chunks...") |
| |
|
| | dataset_iter = iter(dataset) |
| | chunk_data = None |
| |
|
| | for i in range(num_samples): |
| | sample = next(dataset_iter) |
| |
|
| | if chunk_data is None: |
| | |
| | chunk_data = {key: [] for key in sample.keys()} |
| |
|
| | |
| | for key, value in sample.items(): |
| | chunk_data[key].append(value) |
| |
|
| | |
| | if (i + 1) % chunk_size == 0 or i == num_samples - 1: |
| | chunk_idx = i // chunk_size |
| |
|
| | |
| | table_data = {} |
| | for key, values in chunk_data.items(): |
| | |
| | converted_values = [] |
| | for value in values: |
| | if hasattr(value, 'tolist'): |
| | converted_values.append(value.tolist()) |
| | else: |
| | converted_values.append(value) |
| | table_data[key] = converted_values |
| |
|
| | |
| | table = pa.table(table_data) |
| |
|
| | |
| | filename = f"{split_name}-{chunk_idx:05d}-of-{num_chunks:05d}.parquet" |
| | filepath = os.path.join(output_dir, filename) |
| | pq.write_table(table, filepath) |
| |
|
| | print(f"Saved chunk {chunk_idx + 1}/{num_chunks}: {filepath}") |
| |
|
| | |
| | chunk_data = {key: [] for key in sample.keys()} |
| |
|
| | print(f"Generated {num_samples} {split_name} samples") |
| | return num_samples |
| |
|
| |
|
| | if __name__ == "__main__": |
| | np.random.seed(42) |
| |
|
| | |
| | generate_dataset_split("train", num_samples=1000, chunk_size=100) |
| |
|
| | |
| | generate_dataset_split("test", num_samples=200, chunk_size=100) |
| |
|