DabbyOWL commited on
Commit
d9c756d
·
verified ·
0 Parent(s):

Reset history

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +206 -0
  2. DATA_GUIDE.md +817 -0
  3. MODEL_GUIDE.md +352 -0
  4. README.md +515 -0
  5. configs/1dkdv.yaml +5 -0
  6. configs/1dkdv_ttt.yaml +8 -0
  7. configs/2ddf.yaml +8 -0
  8. configs/2ddf_ttt.yaml +9 -0
  9. configs/2dns.yaml +5 -0
  10. configs/2dns_ttt.yaml +10 -0
  11. configs/2drddu.yaml +7 -0
  12. configs/2drddu_ttt.yaml +12 -0
  13. configs/2drdk.yaml +6 -0
  14. configs/2drdk_ttt.yaml +10 -0
  15. configs/2dtf.yaml +6 -0
  16. configs/2dtf_ttt.yaml +11 -0
  17. configs/base.yaml +19 -0
  18. configs/callbacks/2ddf.yaml +9 -0
  19. configs/callbacks/base.yaml +10 -0
  20. configs/data/base.yaml +112 -0
  21. configs/lightning_module/base.yaml +10 -0
  22. configs/lightning_module/ttt.yaml +8 -0
  23. configs/logging/base.yaml +4 -0
  24. configs/loss/mse.yaml +5 -0
  25. configs/loss/relative.yaml +5 -0
  26. configs/lr_scheduler/cosine.yaml +2 -0
  27. configs/model/fno.yaml +36 -0
  28. configs/model/fno_50k.yaml +9 -0
  29. configs/model/fno_50mil.yaml +9 -0
  30. configs/model/resnet.yaml +35 -0
  31. configs/model/scot.yaml +41 -0
  32. configs/optimizer/adam.yaml +2 -0
  33. configs/system_params/1dkdv.yaml +17 -0
  34. configs/system_params/2ddf.yaml +27 -0
  35. configs/system_params/2dns.yaml +16 -0
  36. configs/system_params/2drddu.yaml +9 -0
  37. configs/system_params/2drdk.yaml +18 -0
  38. configs/system_params/2dtf.yaml +16 -0
  39. configs/system_params/base.yaml +91 -0
  40. configs/tailoring_optimizer/adam.yaml +2 -0
  41. configs/tailoring_optimizer/sgd.yaml +2 -0
  42. configs/trainer/trainer.yaml +4 -0
  43. configs/ttt_base.yaml +14 -0
  44. environment.yml +144 -0
  45. fluid_stats.py +418 -0
  46. huggingface_pdeinv_download.py +60 -0
  47. pdeinvbench/__init__.py +5 -0
  48. pdeinvbench/data/__init__.py +1 -0
  49. pdeinvbench/data/dataset.py +360 -0
  50. pdeinvbench/data/transforms.py +80 -0
.gitignore ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore W&B
2
+ wandb/**
3
+
4
+ # Mac os files
5
+ .DS_Store
6
+
7
+ # Ignore .specstory directory
8
+ .specstory/
9
+
10
+ # Local data store
11
+ **.npz
12
+ **.json
13
+
14
+ # Ignore model files
15
+ **.pt
16
+ **.pth
17
+
18
+ # Ignore local scripts (and local images)
19
+ local_scripts/**
20
+ tests/test-images
21
+ # **.png
22
+ **.jpeg
23
+ **.pdf
24
+
25
+
26
+ #ignore runner scripts
27
+ runner*
28
+ slurm*
29
+ # Ignore local directories
30
+ notebooks/**
31
+ local-scripts/**
32
+ .vscode/**
33
+
34
+ # Logging folders
35
+ test-images/**
36
+ logs/**
37
+ wandb/**
38
+ outputs/**
39
+
40
+ # wandb artifacts containing model checkpoints
41
+ artifacts/**
42
+
43
+ # Byte-compiled / optimized / DLL files
44
+ __pycache__/
45
+ *.py[cod]
46
+ *$py.class
47
+
48
+ # C extensions
49
+ *.so
50
+
51
+ # Distribution / packaging
52
+ .Python
53
+ build/
54
+ develop-eggs/
55
+ dist/
56
+ downloads/
57
+ eggs/
58
+ .eggs/
59
+ lib/
60
+ lib64/
61
+ parts/
62
+ sdist/
63
+ var/
64
+ wheels/
65
+ share/python-wheels/
66
+ *.egg-info/
67
+ .installed.cfg
68
+ *.egg
69
+ MANIFEST
70
+
71
+ # PyInstaller
72
+ # Usually these files are written by a python script from a template
73
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
74
+ *.manifest
75
+ *.spec
76
+
77
+ # Installer logs
78
+ pip-log.txt
79
+ pip-delete-this-directory.txt
80
+
81
+ # Unit test / coverage reports
82
+ htmlcov/
83
+ .tox/
84
+ .nox/
85
+ .coverage
86
+ .coverage.*
87
+ .cache
88
+ nosetests.xml
89
+ coverage.xml
90
+ *.cover
91
+ *.py,cover
92
+ .hypothesis/
93
+ .pytest_cache/
94
+ cover/
95
+
96
+ # Translations
97
+ *.mo
98
+ *.pot
99
+
100
+ # Django stuff:
101
+ *.log
102
+ local_settings.py
103
+ db.sqlite3
104
+ db.sqlite3-journal
105
+
106
+ # Flask stuff:
107
+ instance/
108
+ .webassets-cache
109
+
110
+ # Scrapy stuff:
111
+ .scrapy
112
+
113
+ # Sphinx documentation
114
+ docs/_build/
115
+
116
+ # PyBuilder
117
+ .pybuilder/
118
+ target/
119
+
120
+ # Jupyter Notebook
121
+ .ipynb_checkpoints
122
+ temp.ipynb
123
+
124
+ # Model checkpoints
125
+ *ckpt
126
+
127
+ # IPython
128
+ profile_default/
129
+ ipython_config.py
130
+
131
+ # pyenv
132
+ # For a library or package, you might want to ignore these files since the code is
133
+ # intended to run in multiple environments; otherwise, check them in:
134
+ # .python-version
135
+
136
+ # pipenv
137
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
138
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
139
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
140
+ # install all needed dependencies.
141
+ #Pipfile.lock
142
+
143
+ # poetry
144
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
145
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
146
+ # commonly ignored for libraries.
147
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
148
+ #poetry.lock
149
+
150
+ # pdm
151
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
152
+ #pdm.lock
153
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
154
+ # in version control.
155
+ # https://pdm.fming.dev/#use-with-ide
156
+ .pdm.toml
157
+
158
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
159
+ __pypackages__/
160
+
161
+ # Celery stuff
162
+ celerybeat-schedule
163
+ celerybeat.pid
164
+
165
+ # SageMath parsed files
166
+ *.sage.py
167
+
168
+ # Environments
169
+ .env
170
+ .venv
171
+ env/
172
+ venv/
173
+ ENV/
174
+ env.bak/
175
+ venv.bak/
176
+
177
+ # Spyder project settings
178
+ .spyderproject
179
+ .spyproject
180
+
181
+ # Rope project settings
182
+ .ropeproject
183
+
184
+ # mkdocs documentation
185
+ /site
186
+
187
+ # mypy
188
+ .mypy_cache/
189
+ .dmypy.json
190
+ dmypy.json
191
+
192
+ # Pyre type checker
193
+ .pyre/
194
+
195
+ # pytype static type analyzer
196
+ .pytype/
197
+
198
+ # Cython debug symbols
199
+ cython_debug/
200
+
201
+ # PyCharm
202
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
203
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
204
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
205
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
206
+ #.idea/
DATA_GUIDE.md ADDED
@@ -0,0 +1,817 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PDEInvBench Data Guide
2
+
3
+ ## Table of Contents
4
+
5
+ 1. [Dataset Link](#1-dataset-link)
6
+ 2. [Downloading Data](#2-downloading-data)
7
+ 3. [Overview](#3-overview)
8
+ - [3.1 Data Format](#31-data-format)
9
+ - [3.2 Parameter Extraction from Filenames](#32-parameter-extraction-from-filenames)
10
+ - [3.3 Working with High-Resolution Data](#33-working-with-high-resolution-data)
11
+ - [3.4 Data Loading Parameters](#34-data-loading-parameters)
12
+ - [3.5 Parameter Normalization](#35-parameter-normalization)
13
+ 4. [Datasets](#4-datasets)
14
+ - [4a. 2D Reaction Diffusion](#4a-2d-reaction-diffusion)
15
+ - [4b. 2D Navier Stokes (Unforced)](#4b-2d-navier-stokes-unforced)
16
+ - [4c. 2D Turbulent Flow (Forced Navier Stokes)](#4c-2d-turbulent-flow-forced-navier-stokes)
17
+ - [4d. 1D Korteweg-De Vries](#4d-1d-korteweg-de-vries)
18
+ - [4e. 2D Darcy Flow](#4e-2d-darcy-flow)
19
+ 5. [Adding a New Dataset](#5-adding-a-new-dataset)
20
+
21
+
22
+ ## 1. Dataset Link
23
+
24
+ The dataset used in this project can be found here:
25
+ https://huggingface.co/datasets/DabbyOWL/PDE_Inverse_Problem_Benchmarking/tree/main
26
+
27
+ ## 2. Downloading Data
28
+
29
+ We provide a python script: [`huggingface_pdeinv_download.py`](huggingface_pdeinv_download.py) to batch download our hugging-face data. We will update the readme of our hugging-face dataset and our github repo to reflect this addition. To run this:
30
+
31
+ ```bash
32
+ pip install huggingface_hub
33
+ python3 huggingface_pdeinv_download.py [--dataset DATASET_NAME] [--split SPLIT] [--local-dir PATH]
34
+ ```
35
+
36
+ **Available datasets:** `darcy-flow-241`, `darcy-flow-421`, `korteweg-de-vries-1d`, `navier-stokes-forced-2d-2048`, `navier-stokes-forced-2d`, `navier-stokes-unforced-2d`, `reaction-diffusion-2d-du-512`, `reaction-diffusion-2d-du`, `reaction-diffusion-2d-k-512`, `reaction-diffusion-2d-k`
37
+
38
+ **Available splits:** `*` (all), `train`, `validation`, `test`, `out_of_distribution`, `out_of_distribution_extreme`
39
+
40
+
41
+ ## 3. Overview
42
+
43
+ The PDEInvBench dataset contains five PDE systems spanning parabolic, hyperbolic, and elliptic classifications, designed for benchmarking inverse parameter estimation.
44
+
45
+ ### Dataset Scale and Scope
46
+
47
+ The dataset encompasses **over 1.2 million individual simulations** across five PDE systems, with varying spatial and temporal resolutions:
48
+
49
+ - **2D Reaction Diffusion**: 28×28×27 = 21,168 parameter combinations × 5 trajectories = 105,840 simulations
50
+ - **2D Navier Stokes**: 101 parameter values × 192 trajectories = 19,392 simulations
51
+ - **2D Turbulent Flow**: 120 parameter values × 108 trajectories = 12,960 simulations
52
+ - **1D Korteweg-De Vries**: 100 parameter values × 100 trajectories = 10,000 simulations
53
+ - **2D Darcy Flow**: 2,048 unique coefficient fields
54
+
55
+ ### Multi-Resolution Architecture
56
+
57
+ The dataset provides multiple spatial resolutions for each system, enabling studies on resolution-dependent generalization:
58
+
59
+ - **Low Resolution**: 64×64 (2D systems), 256 (1D KdV), 241×241 (Darcy Flow)
60
+ - **Medium Resolution**: 128×128 (2D systems), 256×256 (Turbulent Flow)
61
+ - **High Resolution**: 256×256, 512×512, 1024×1024 (2D systems), 421×421 (Darcy Flow)
62
+
63
+ ### Physical and Mathematical Diversity
64
+
65
+ **Parabolic Systems** (Time-dependent, diffusive):
66
+ - **2D Reaction Diffusion**: Chemical pattern formation with Fitzhugh-Nagumo dynamics
67
+ - **2D Navier Stokes**: Fluid flow without external forcing
68
+ - **2D Turbulent Flow**: Forced fluid dynamics with Kolmogorov forcing
69
+
70
+ **Hyperbolic Systems** (Wave propagation):
71
+ - **1D Korteweg-De Vries**: Soliton dynamics in shallow water waves
72
+
73
+ **Elliptic Systems** (Steady-state):
74
+ - **2D Darcy Flow**: Groundwater flow through porous media
75
+
76
+ ### Parameter Space Coverage
77
+
78
+ The dataset systematically explores parameter spaces across different physical regimes:
79
+
80
+ - **Reaction Diffusion**: k ∈ [0.005,0.1], Du ∈ [0.01,0.5], Dv ∈ [0.01,0.5] (Turing bifurcations)
81
+ - **Navier Stokes**: ν ∈ [10⁻⁴,10⁻²] (Reynolds: 80-8000, laminar to transitional)
82
+ - **Turbulent Flow**: ν ∈ [10⁻⁵,10⁻²] (fully developed turbulence)
83
+ - **Korteweg-De Vries**: δ ∈ [0.8,5] (dispersion strength in shallow water)
84
+ - **Darcy Flow**: Piecewise constant diffusion coefficients (porous media heterogeneity)
85
+
86
+ ### Evaluation Framework
87
+
88
+ The dataset implements a sophisticated three-tier evaluation system for comprehensive generalization testing:
89
+
90
+ 1. **In-Distribution (ID)**: Parameters within training ranges for baseline performance
91
+ 2. **Out-of-Distribution (Non-Extreme)**: Middle-range parameters excluded from training
92
+ 3. **Out-of-Distribution (Extreme)**: Extremal parameter values for stress testing
93
+
94
+ This framework enables systematic evaluation of model robustness across parameter space, critical for real-world deployment where models must generalize beyond training distributions.
95
+
96
+
97
+ ### Data Organization and Accessibility
98
+
99
+ The dataset is organized in a standardized HDF5 format with:
100
+
101
+ - **Hierarchical Structure**: Train/validation/test splits with consistent naming conventions
102
+ - **Parameter Encoding**: Filenames encode parameter values for easy parsing
103
+ - **Multi-Channel Support**: 2D systems support multiple solution channels (velocity components, chemical species)
104
+ - **Grid Information**: Complete spatial and temporal coordinate information
105
+ - **Normalization Statistics**: Pre-computed parameter normalization for consistent preprocessing
106
+
107
+ ### Key Features for Inverse Problem Benchmarking
108
+
109
+ 1. **Multi-Physics Coverage**: Spans chemical, fluid, wave, and porous media physics
110
+ 2. **Resolution Scalability**: Enables studies on resolution-dependent model behavior
111
+ 3. **Parameter Diversity**: Systematic exploration of parameter spaces across physical regimes
112
+ 4. **Generalization Testing**: Built-in evaluation framework for out-of-distribution performance
113
+ 5. **Computational Efficiency**: Optimized data loading and preprocessing pipelines
114
+ 6. **Reproducibility**: Complete documentation of generation parameters and solver configurations
115
+
116
+ This comprehensive dataset provides researchers with a unified platform for developing and evaluating inverse problem solving methods across diverse scientific domains, enabling systematic comparison of approaches and identification of fundamental limitations in current methodologies.
117
+
118
+ ### 3.1 Data Format
119
+
120
+ All datasets are stored in HDF5 format with specific structure depending on the PDE system.
121
+
122
+ #### Directory Structure
123
+
124
+ Datasets should be organized in the following directory structure:
125
+
126
+ ```
127
+ /path/to/data/
128
+ ├── train/
129
+ │ ├── param_file_1.h5
130
+ │ ├── param_file_2.h5
131
+ │ └── ...
132
+ ├── validation/
133
+ │ ├── param_file_3.h5
134
+ │ └── ...
135
+ └── test/
136
+ ├── param_file_4.h5
137
+ └── ...
138
+ ```
139
+
140
+ ### 3.2 Parameter Extraction from Filenames
141
+
142
+ Parameters are extracted from filenames using pattern matching. For example:
143
+
144
+ - **2D Reaction Diffusion**: `Du=0.1_Dv=0.2_k=0.05.h5`
145
+ - Du = 0.1, Dv = 0.2, k = 0.05
146
+
147
+ - **2D Navier Stokes**: `83.0.h5`
148
+ - Reynolds number = 83.0
149
+
150
+ - **1D KdV**: `delta=3.5_ic=42.h5`
151
+ - δ = 3.5
152
+
153
+ ### 3.3 Working with High-Resolution Data
154
+
155
+ For high-resolution datasets, we provide configurations for downsampling:
156
+
157
+ | PDE System | Original Resolution | High-Resolution |
158
+ |------------|:-------------------:|:---------------:|
159
+ | 2D Reaction Diffusion | 128×128 | 256×256, 512×512 |
160
+ | 2D Navier Stokes | 64×64 | 128×128, 256×256 |
161
+ | 2D Turbulent Flow | 256×256 | 512×512, 1024×1024 |
162
+ | Darcy Flow | 241×241 | 421×421 |
163
+
164
+ When working with high-resolution data, set the following parameters:
165
+
166
+ ```bash
167
+ high_resolution=True
168
+ data.downsample_factor=4 # e.g., for 512×512 → 128×128
169
+ data.batch_size=2 # Reduce batch size for GPU memory
170
+ ```
171
+
172
+ ### 3.4 Data Loading Parameters
173
+
174
+ Key parameters for loading data:
175
+
176
+ - `data.every_nth_window`: Controls sampling frequency of time windows
177
+ - `data.frac_ics_per_param`: Fraction of initial conditions per parameter to use
178
+ - `data.frac_param_combinations`: Fraction of parameter combinations to use
179
+ - `data.train_window_end_percent`: Percentage of trajectory used for training
180
+ - `data.test_window_start_percent`: Percentage where test window starts
181
+
182
+ ### 3.5 Parameter Normalization
183
+
184
+ Parameters are normalized using the following statistics, where the mean and standard deviation are computed using the span of the parameters in the dataset:
185
+
186
+ ```python
187
+ PARAM_NORMALIZATION_STATS = {
188
+ PDE.ReactionDiffusion2D: {
189
+ "k": (0.06391126306498819, 0.029533048151465856), # (mean, std)
190
+ "Du": (0.3094992685910578, 0.13865605073673604), # (mean, std)
191
+ "Dv": (0.259514500345804, 0.11541850276902947), # (mean, std)
192
+ },
193
+ PDE.NavierStokes2D: {"re": (1723.425, 1723.425)}, # (mean, std)
194
+ PDE.TurbulentFlow2D: {"nu": (0.001372469573118451, 0.002146258280849241)},
195
+ PDE.KortewegDeVries1D: {"delta": (2.899999997019768, 1.2246211546444339)},
196
+ # Add more as needed
197
+ }
198
+ ```
199
+
200
+ ## 4. Datasets
201
+
202
+ This section provides detailed information about each PDE system in the dataset. Each subsection includes visualizations, descriptions, and technical specifications.
203
+
204
+ ### 4a. 2D Reaction Diffusion
205
+
206
+ <img src="images/2drd_u_channel.png" alt="2DRD-Activator" width="400">
207
+ <img src="images/2drd_v_channel.png" alt="2DRD-Inhibitor" width="400">
208
+
209
+ **Description:** The 2D Reaction-Diffusion system models chemical reactions with spatial diffusion using the Fitzhugh-Nagumo equations. This dataset contains two-channel solutions (activator u and inhibitor v) with parameters k (threshold for excitement), Du (activator diffusivity), and Dv (inhibitor diffusivity). The system exhibits complex pattern formation including spots, stripes, and labyrinthine structures, spanning from dissipative to Turing bifurcations.
210
+
211
+ **Mathematical Formulation:**
212
+ The activator u and inhibitor v coupled system follows:
213
+
214
+ ```
215
+ ∂tu = Du∂xxu + Du∂yyu + Ru
216
+ ∂tv = Dv∂xxv + Dv∂yyv + Rv
217
+ ```
218
+
219
+ where Ru and Rv are defined by the Fitzhugh-Nagumo equations:
220
+
221
+ ```
222
+ Ru(u,v) = u - u³ - k - v
223
+ Rv(u,v) = u - v
224
+ ```
225
+
226
+ **Parameters of Interest:**
227
+ - **Du**: Activator diffusion coefficient
228
+ - **Dv**: Inhibitor diffusion coefficient
229
+ - **k**: Threshold for excitement
230
+
231
+ **Data Characteristics:**
232
+ - Partial Derivatives: 5
233
+ - Time-dependent: Yes (parabolic)
234
+ - Spatial Resolutions: 64×64, 128×128, 256×256
235
+ - Parameters: k ∈ [0.005,0.1], Du ∈ [0.01,0.5], Dv ∈ [0.01,0.5]
236
+ - Temporal Resolution: 0.049/5 seconds
237
+ - Parameter Values: k - 28, Du - 28, Dv - 27
238
+ - Initial Conditions/Trajectories: 5
239
+
240
+ **Evaluation Splits:**
241
+ - **Test (ID)**: k ∈ [0.01,0.04] ∪ [0.08,0.09], Du ∈ [0.08,0.2] ∪ [0.4,0.49], Dv ∈ [0.08,0.2] ∪ [0.4,0.49]
242
+ - **OOD (Non-Extreme)**: k ∈ [0.04,0.08], Du ∈ [0.2,0.4], Dv ∈ [0.2,0.4]
243
+ - **OOD (Extreme)**: k ∈ [0.001,0.01] ∪ [0.09,0.1], Du ∈ [0.02,0.08] ∪ [0.49,0.5], Dv ∈ [0.02,0.08] ∪ [0.49,0.5]
244
+
245
+ **Generation Parameters:**
246
+ - **Solver**: Explicit Runge-Kutta method of order 5(4) (RK45)
247
+ - **Error Tolerance**: Relative error tolerance of 10⁻⁶
248
+ - **Spatial Discretization**: Finite Volume Method (FVM) with uniform 128×128 grid
249
+ - **Domain**: [-1,1] × [-1,1] with cell size Δx = Δy = 0.015625
250
+ - **Burn-in Period**: 1 simulation second
251
+ - **Dataset Simulation Time**: [0,5] seconds, 101 time steps
252
+ - **Nominal Time Step**: Δt ≈ 0.05 seconds (adaptive)
253
+ - **Generation Time**: ≈ 1 week on CPU
254
+
255
+ **File Structure:**
256
+ ```
257
+ filename: Du=0.1_Dv=0.2_k=0.05.h5
258
+ ```
259
+ Contents:
260
+ - `0001/data`: Solution field [time, spatial_dim_1, spatial_dim_2, channels]
261
+ - `0001/grid/x`: x-coordinate grid points
262
+ - `0001/grid/y`: y-coordinate grid points
263
+ - `0001/grid/t`: Time points
264
+
265
+ ### 4b. 2D Navier Stokes (Unforced)
266
+
267
+ <img src="images/2dns.png" alt="2DNS" width="400">
268
+
269
+ **Description:** The 2D Navier-Stokes equations describe incompressible fluid flow without external forcing. This dataset contains velocity field solutions with varying Reynolds numbers, showcasing different flow regimes from laminar to transitional flows.
270
+
271
+ **Mathematical Formulation:**
272
+ We consider the vorticity form of the unforced Navier-Stokes equations:
273
+
274
+ ```
275
+ ∂w(t,x,y)/∂t + u(t,x,y)·∇w(t,x,y) = νΔw(t,x,y)
276
+ ```
277
+
278
+ for t ∈ [0,T] and (x,y) ∈ (0,1)², with auxiliary conditions:
279
+ - w = ∇ × u
280
+ - ∇ · u = 0
281
+ - w(0,x,y) = w₀(x,y) (Boundary Conditions)
282
+
283
+ **Parameters of Interest:**
284
+ - **ν**: The physical parameter of interest, representing viscosity
285
+
286
+ **Data Characteristics:**
287
+ - Partial Derivatives: 3
288
+ - Time-dependent: Yes (parabolic)
289
+ - Spatial Resolutions: 64×64, 128×128, 256×256
290
+ - Parameters: ν ∈ [10⁻⁴,10⁻²] (Reynolds: 80-8000)
291
+ - Temporal Resolution: 0.0468/3 seconds
292
+ - Parameter Values: 101
293
+ - Initial Conditions/Trajectories: 192
294
+
295
+ **Evaluation Splits:**
296
+ - **Test (ID)**: ν ∈ [10⁻³·⁸, 10⁻³·²] ∪ [10⁻²·⁸, 10⁻²·²]
297
+ - **OOD (Non-Extreme)**: ν ∈ [10⁻³·², 10⁻²·⁸]
298
+ - **OOD (Extreme)**: ν ∈ [10⁻⁴, 10⁻³·⁸] ∪ [10⁻²·², 10⁻²]
299
+
300
+ **Generation Parameters:**
301
+ - **Solver**: Pseudo-spectral solver with Crank-Nicolson time-stepping
302
+ - **Implementation**: Written in Jax and GPU-accelerated
303
+ - **Generation Time**: ≈ 3.5 GPU days (batch size=32)
304
+ - **Burn-in Period**: 15 simulation seconds
305
+ - **Saved Data**: Next 3 simulation seconds saved as dataset
306
+ - **Initial Conditions**: Sampled according to Gaussian random field (length scale=0.8)
307
+ - **Recording**: Solution recorded every 1 simulation second
308
+ - **Simulation dt**: 1e-4
309
+ - **Resolution**: 256×256
310
+
311
+ **File Structure:**
312
+ ```
313
+ filename: 83.0.h5
314
+ ```
315
+ Contents:
316
+ - `0001/data`: Solution field [time, spatial_dim_1, spatial_dim_2, channels]
317
+ - `0001/grid/x`: x-coordinate grid points
318
+ - `0001/grid/y`: y-coordinate grid points
319
+ - `0001/grid/t`: Time points
320
+
321
+ ### 4c. 2D Turbulent Flow (Forced Navier Stokes)
322
+
323
+ <img src="images/2dtf.png" alt="2DTF" width="400">
324
+
325
+ **Description:** The 2D Turbulent Flow dataset represents forced Navier-Stokes equations that generate fully developed turbulent flows. This dataset is particularly valuable for studying complex, multi-scale fluid dynamics and turbulent phenomena. All solutions exhibit turbulence across various Reynolds numbers.
326
+
327
+ **Mathematical Formulation:**
328
+ The forced Navier-Stokes equations with the Kolmogorov forcing function are similar to the unforced case with an additional forcing term:
329
+
330
+ ```
331
+ ∂ₜw + u·∇w = νΔw + f(k,y) - αw
332
+ ```
333
+
334
+ where the forcing function f(k,y) is defined as:
335
+ ```
336
+ f(k,y) = -kcos(ky)
337
+ ```
338
+
339
+ **Parameters of Interest:**
340
+ - **ν**: Kinematic viscosity (similar to unforced NS)
341
+ - **α**: Drag coefficient (fixed at α = 0.1)
342
+ - **k**: Forced wavenumber (fixed at k = 2)
343
+
344
+ The drag coefficient α primarily serves to keep the total energy of the system constant, acting as drag. The task is to predict ν.
345
+
346
+ **Data Characteristics:**
347
+ - Partial Derivatives: 3
348
+ - Time-dependent: Yes (parabolic)
349
+ - Spatial Resolutions: 256×256, 512×512, 1024×1024
350
+ - Parameters: ν ∈ [10⁻⁵,10⁻²]
351
+ - Temporal Resolution: 0.23/14.75 seconds
352
+ - Parameter Values: 120
353
+ - Initial Conditions/Trajectories: 108
354
+
355
+ **Evaluation Splits:**
356
+ - **Test (ID)**: ν ∈ [10⁻⁴·⁷, 10⁻³·⁸] ∪ [10⁻³·², 10⁻²·³]
357
+ - **OOD (Non-Extreme)**: ν ∈ [10⁻³·⁸, 10⁻³·²]
358
+ - **OOD (Extreme)**: ν ∈ [10⁻⁵, 10⁻⁴·⁷] ∪ [10⁻²·³, 10⁻²]
359
+
360
+ **Generation Parameters:**
361
+ - **Solver**: Pseudo-spectral solver with Crank-Nicolson time-stepping
362
+ - **Implementation**: Written in Jax (leveraging Jax-CFD), similar to 2D NS
363
+ - **Generation Time**: ≈ 4 GPU days (A100)
364
+ - **Burn-in Period**: 40 simulation seconds
365
+ - **Saved Data**: Next 15 simulation seconds saved as dataset
366
+ - **Simulator Resolution**: 256×256
367
+ - **Downsampling**: Downsamples to 64×64 before saving
368
+ - **Temporal Resolution (Saved)**: ∂t = 0.25 simulation seconds
369
+
370
+ **File Structure:**
371
+ ```
372
+ filename: nu=0.001.h5
373
+ ```
374
+ Contents:
375
+ - `0001/data`: Solution field [time, spatial_dim_1, spatial_dim_2, channels]
376
+ - `0001/grid/x`: x-coordinate grid points
377
+ - `0001/grid/y`: y-coordinate grid points
378
+ - `0001/grid/t`: Time points
379
+
380
+ ### 4d. 1D Korteweg-De Vries
381
+
382
+ <img src="images/1dkdv.png" alt="KdV" width="400">
383
+
384
+ **Description:** The Korteweg-De Vries (KdV) equation is a nonlinear partial differential equation that describes shallow water waves and solitons. This 1D dataset contains soliton solutions with varying dispersion parameters, demonstrating wave propagation and interaction phenomena.
385
+
386
+ **Mathematical Formulation:**
387
+ KdV is a 1D PDE representing waves on a shallow-water surface. The governing equation follows the form:
388
+
389
+ ```
390
+ 0 = ∂ₜu + u·∂ₓu + δ²∂ₓₓₓu
391
+ ```
392
+
393
+ **Parameters of Interest:**
394
+ - **δ**: The physical parameter representing the strength of the dispersive effect on the system
395
+ - In shallow water wave theory, δ is a unit-less quantity roughly indicating the relative depth of the water
396
+
397
+ **Data Characteristics:**
398
+ - Partial Derivatives: 3
399
+ - Time-dependent: Yes (hyperbolic)
400
+ - Spatial Resolution: 256
401
+ - Parameters: δ ∈ [0.8,5]
402
+ - Temporal Resolution: 0.73/102 seconds
403
+ - Parameter Values: 100
404
+ - Initial Conditions/Trajectories: 100
405
+
406
+ **Evaluation Splits:**
407
+ - **Test (ID)**: δ ∈ [1.22, 2.48] ∪ [3.32, 4.58]
408
+ - **OOD (Non-Extreme)**: δ ∈ [2.48, 3.32]
409
+ - **OOD (Extreme)**: δ ∈ [0.8, 1.22] ∪ [4.58, 5]
410
+
411
+ **Generation Parameters:**
412
+ - **Domain**: Periodic domain [0,L]
413
+ - **Spatial Discretization**: Pseudospectral method with Fourier basis (Nₓ = 256 grid points)
414
+ - **Time Integration**: Implicit Runge-Kutta method (Radau IIA, order 5)
415
+ - **Implementation**: SciPy's `solve_ivp` on CPU
416
+ - **Generation Time**: ≈ 12 hours
417
+ - **Burn-in Period**: 40 simulation seconds
418
+
419
+ **Initial Conditions:**
420
+ Initial conditions are sampled from a distribution over a truncated Fourier Series:
421
+
422
+ ```
423
+ u₀(x) = Σ_{k=1}^K A_k sin(2πl_k x/L + φ_k)
424
+ ```
425
+
426
+ where:
427
+ - A_k, φ_k ~ U(0,1)
428
+ - l_k ~ U(1,3)
429
+
430
+ **File Structure:**
431
+ ```
432
+ filename: delta=3.5_ic=42.h5
433
+ ```
434
+ Contents:
435
+ - `tensor`: Solution field with shape [time, spatial_dim]
436
+ - `x-coordinate`: Spatial grid points
437
+ - `t-coordinate`: Time points
438
+
439
+ ### 4e. 2D Darcy Flow
440
+
441
+ <img src="images/2ddf.png" alt="2DDF" width="400">
442
+
443
+ **Description:** The 2D Darcy Flow dataset represents steady-state flow through porous media with piecewise constant diffusion coefficients. This time-independent system is commonly used in groundwater flow modeling and subsurface transport problems. All solutions converge to a non-trivial steady-state solution based on the diffusion coefficient field.
444
+
445
+ **Mathematical Formulation:**
446
+ The 2D steady-state Darcy flow equation on a unit box Ω = (0,1)² is a second-order linear elliptic PDE with Dirichlet boundary conditions:
447
+
448
+ ```
449
+ -∇·(a(x)∇u(x)) = f(x), for x ∈ Ω
450
+ u(x) = 0, for x ∈ ∂Ω
451
+ ```
452
+
453
+ where:
454
+ - a ∈ L∞((0,1)²;R⁺) is a piecewise constant diffusion coefficient
455
+ - u(x) is the pressure field
456
+ - f(x) = 1 is a fixed forcing function
457
+
458
+ **Parameters of Interest:**
459
+ - **a(x)**: Piecewise constant diffusion coefficient field (spatially varying parameter)
460
+
461
+ **Data Characteristics:**
462
+ - Partial Derivatives: 2
463
+ - Time-dependent: No (elliptic)
464
+ - Spatial Resolutions: 241×241, 421×421
465
+ - Parameters: Piecewise constant diffusion coefficient a ∈ L∞((0,1)²;R⁺)
466
+ - Temporal Resolution: N/A (steady-state)
467
+ - Parameter Values: 2048
468
+ - Initial Conditions/Trajectories: N/A
469
+
470
+ **Evaluation Splits:**
471
+
472
+ Unlike time-dependent systems with scalar parameters, Darcy Flow does not admit parameter splits based on numeric ranges. Instead, splits are defined using a derived statistic of the coefficient field.
473
+
474
+ Let \( r(a) \) denote the fraction of grid points in the coefficient field \( a(x) \) that take the maximum value (12).
475
+ This statistic is approximately normally distributed across coefficient fields.
476
+
477
+ Splits are defined as:
478
+
479
+ - **Test (ID):** Coefficient fields whose \( r(a) \) lies within the central mass of the distribution
480
+ - **OOD (Non-Extreme):** Not applicable
481
+ - **OOD (Extreme):** Coefficient fields whose \( r(a) \) lies in the tails beyond \( \pm 1.5\sigma \)
482
+
483
+
484
+ **Generation Parameters:**
485
+ - **Solver**: Second-order finite difference method
486
+ - **Implementation**: Originally written in Matlab, runs on CPU
487
+ - **Resolution**: 421×421 (original), with lower resolution dataset generated by downsampling
488
+ - **Coefficient Field Sampling**: a(x) is sampled from μ = Γ(N(0, -Δ + 9I)⁻²)
489
+ - **Gamma Mapping**: Element-wise map where a_i ~ N(0, -Δ + 9I)⁻² → {3,12}
490
+ - a_i → 12 when a_i ≥ 0
491
+ - a_i → 3 when a_i < 0
492
+ - **Boundary Conditions**: Zero Neumann boundary conditions on the Laplacian over the coefficient field
493
+
494
+ **File Structure:**
495
+ ```
496
+ filename: sample_1024.h5
497
+ ```
498
+ Contents:
499
+ - `coeff`: Piecewise constant coefficient field
500
+ - `sol`: Solution field
501
+
502
+
503
+ ## 5. Adding a New Dataset
504
+
505
+ The PDEInvBench framework is designed to be modular, allowing you to add new PDE systems. This section describes how to add a new dataset to the repository. For information about data format requirements, see [Section 4.1](#41-data-format).
506
+
507
+ ### Table of Contents
508
+ - [Step 1: Add PDE Type to Utils](#step-1-add-pde-type-to-utils)
509
+ - [Step 2: Add PDE Attributes](#step-2-add-pde-attributes)
510
+ - [Step 3: Add Parameter Normalization Stats](#step-3-add-parameter-normalization-stats)
511
+ - [Step 4: Add Parameter Extraction Logic](#step-4-add-parameter-extraction-logic)
512
+ - [Step 5: Create a Dataset Handler](#step-5-create-a-dataset-handler-if-needed)
513
+ - [Step 6: Create a Data Configuration](#step-6-create-a-data-configuration)
514
+ - [Step 7: Add Residual Functions](#step-7-add-residual-functions)
515
+ - [Step 8: Create a Combined Configuration](#step-8-create-a-combined-configuration)
516
+ - [Step 9: Generate and Prepare Data](#step-9-generate-and-prepare-data)
517
+ - [Step 10: Run Experiments](#step-10-run-experiments)
518
+ - [Data Format Requirements](#data-format-requirements)
519
+
520
+ ### Step 1: Add PDE Type to Utils
521
+
522
+ First, add your new PDE system to `pdeinvbench/utils/types.py`:
523
+
524
+ ```python
525
+ class PDE(enum.Enum):
526
+ """
527
+ Describes which PDE system currently being used.
528
+ """
529
+ # Existing PDEs...
530
+ ReactionDiffusion1D = "Reaction Diffusion 1D"
531
+ ReactionDiffusion2D = "Reaction Diffusion 2D"
532
+ NavierStokes2D = "Navier Stokes 2D"
533
+ # Add your new PDE
534
+ YourNewPDE = "Your New PDE Description"
535
+ ```
536
+
537
+ ### Step 2: Add PDE Attributes
538
+
539
+ Update the attribute dictionaries in `pdeinvbench/utils/types.py` with information about your new PDE:
540
+
541
+ ```python
542
+ # Number of partial derivatives
543
+ PDE_PARTIALS = {
544
+ # Existing PDEs...
545
+ PDE.YourNewPDE: 3, # Number of partial derivatives needed
546
+ }
547
+
548
+ # Number of spatial dimensions
549
+ PDE_NUM_SPATIAL = {
550
+ # Existing PDEs...
551
+ PDE.YourNewPDE: 2, # 1 for 1D PDEs, 2 for 2D PDEs
552
+ }
553
+
554
+ # Spatial size of the grid
555
+ PDE_SPATIAL_SIZE = {
556
+ # Existing PDEs...
557
+ PDE.YourNewPDE: [128, 128], # Spatial dimensions of your dataset
558
+ }
559
+
560
+ # High-resolution spatial size (if applicable)
561
+ HIGH_RESOLUTION_PDE_SPATIAL_SIZE = {
562
+ # Existing PDEs...
563
+ PDE.YourNewPDE: [512, 512], # High-res dimensions
564
+ }
565
+
566
+ # Number of parameters
567
+ PDE_NUM_PARAMETERS = {
568
+ # Existing PDEs...
569
+ PDE.YourNewPDE: 2, # Number of parameters in your PDE
570
+ }
571
+
572
+ # Parameter values
573
+ PDE_PARAM_VALUES = {
574
+ # Existing PDEs...
575
+ PDE.YourNewPDE: {
576
+ "param1": [0.1, 0.2, 0.3], # List of possible values for param1
577
+ "param2": [1.0, 2.0, 3.0], # List of possible values for param2
578
+ },
579
+ }
580
+
581
+ # Number of data channels
582
+ PDE_NUM_CHANNELS = {
583
+ # Existing PDEs...
584
+ PDE.YourNewPDE: 2, # Number of channels in your solution field
585
+ }
586
+
587
+ # Number of timesteps in the trajectory
588
+ PDE_TRAJ_LEN = {
589
+ # Existing PDEs...
590
+ PDE.YourNewPDE: 100, # Number of timesteps in your trajectories
591
+ }
592
+ ```
593
+
594
+ ### Step 3: Add Parameter Normalization Stats
595
+
596
+ Update `pdeinvbench/data/utils.py` with normalization statistics for your PDE parameters:
597
+
598
+ ```python
599
+ PARAM_NORMALIZATION_STATS = {
600
+ # Existing PDEs...
601
+ PDE.YourNewPDE: {
602
+ "param1": (0.2, 0.05), # (mean, std) for param1
603
+ "param2": (2.0, 0.5), # (mean, std) for param2
604
+ },
605
+ }
606
+ ```
607
+
608
+ ### Step 4: Add Parameter Extraction Logic
609
+
610
+ Add logic to extract parameters from your dataset files in `extract_params_from_path` function inside the dataset class:
611
+
612
+ ```python
613
+ def extract_params_from_path(path: str, pde: PDE) -> dict:
614
+ # Existing code...
615
+ elif pde == PDE.YourNewPDE:
616
+ # Parse the filename to extract parameters
617
+ name = os.path.basename(path)
618
+ # Example: extract parameters from filename format "param1=X_param2=Y.h5"
619
+ param1 = torch.Tensor([float(name.split("param1=")[1].split("_")[0])])
620
+ param2 = torch.Tensor([float(name.split("param2=")[1].split(".")[0])])
621
+ param_dict = {"param1": param1, "param2": param2}
622
+ # Existing code...
623
+ return param_dict
624
+ ```
625
+
626
+ ### Step 5: Create a Dataset Handler (if needed)
627
+
628
+ If your PDE requires special handling beyond what `PDE_MultiParam` provides, create a new dataset class in `pdeinvbench/data/`:
629
+
630
+ ```python
631
+ # Example: pdeinvbench/data/your_new_pde_dataset.py
632
+ import torch
633
+ from torch.utils.data import Dataset
634
+
635
+ class YourNewPDEDataset(Dataset):
636
+ """
637
+ Custom dataset class for your new PDE system.
638
+ """
639
+ def __init__(
640
+ self,
641
+ data_root: str,
642
+ pde: PDE,
643
+ n_past: int,
644
+ n_future: int,
645
+ mode: str,
646
+ train: bool,
647
+ # Other parameters...
648
+ ):
649
+ # Initialization code...
650
+ pass
651
+
652
+ def __len__(self):
653
+ # Implementation...
654
+ pass
655
+
656
+ def __getitem__(self, index: int):
657
+ # Implementation...
658
+ pass
659
+ ```
660
+
661
+ Add your new dataset to `pdeinvbench/data/__init__.py`:
662
+
663
+ ```python
664
+ from .pde_multiparam import PDE_MultiParam
665
+ from .your_new_pde_dataset import YourNewPDEDataset
666
+
667
+ __all__ = ["PDE_MultiParam", "YourNewPDEDataset"]
668
+ ```
669
+
670
+ ```markdown
671
+ ### Step 6: Create System Configuration
672
+
673
+ Create `configs/system_params/your_new_pde.yaml`:
674
+
675
+ ```yaml
676
+ # configs/system_params/your_new_pde.yaml
677
+ defaults:
678
+ - base
679
+
680
+ # ============ Data Parameters ============
681
+ name: "your_new_pde_inverse"
682
+ data_root: "/path/to/your/data"
683
+ pde_name: "Your New PDE Description" # Must match PDE enum value
684
+ num_channels: 2 # Number of solution channels (e.g., u and v)
685
+ cutoff_first_n_frames: 0 # How many initial frames to skip
686
+
687
+ # ============ Model Parameters ============
688
+ downsampler_input_dim: 2 # 1 for 1D systems, 2 for 2D systems
689
+ params_to_predict: ["param1", "param2"] # What parameters to predict
690
+ normalize: True # Whether to normalize predicted parameters
691
+ ```
692
+
693
+ Then create the top-level config `configs/your_new_pde.yaml`:
694
+
695
+ ```yaml
696
+ # configs/your_new_pde.yaml
697
+ name: your_new_pde
698
+ defaults:
699
+ - _self_
700
+ - base
701
+ - override system_params: your_new_pde
702
+ ```
703
+
704
+ The existing configs/data/base.yaml automatically references ${system_params.*} so data loading works out of the box. Run experiments with:
705
+
706
+
707
+ ```yaml
708
+ python train_inverse.py --config-name=your_new_pde
709
+ python train_inverse.py --config-name=your_new_pde model=fno
710
+ python train_inverse.py --config-name=your_new_pde model=resnet
711
+ ```
712
+
713
+ ### Step 7: Add Residual Functions
714
+
715
+ Implement residual functions for your PDE in `pdeinvbench/losses/pde_residuals.py`:
716
+
717
+ ```python
718
+ def your_new_pde_residual(
719
+ sol: torch.Tensor,
720
+ params: Dict[str, torch.Tensor],
721
+ spatial_grid: Tuple[torch.Tensor, ...],
722
+ t: torch.Tensor,
723
+ return_partials: bool = False,
724
+ ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
725
+ """
726
+ Compute the residual for your new PDE.
727
+
728
+ Args:
729
+ sol: Solution field
730
+ params: Dictionary of PDE parameters
731
+ spatial_grid: Spatial grid coordinates
732
+ t: Time coordinates
733
+ return_partials: Whether to return partial derivatives
734
+
735
+ Returns:
736
+ Residual tensor or (residual, partials) if return_partials=True
737
+ """
738
+ # Implementation...
739
+ pass
740
+ ```
741
+
742
+ Register your residual function in `get_pde_residual_function`:
743
+
744
+ ```python
745
+ def get_pde_residual_function(pde: PDE) -> Callable:
746
+ """Return the appropriate residual function for the given PDE."""
747
+ if pde == PDE.ReactionDiffusion2D:
748
+ return reaction_diffusion_2d_residual
749
+ # Add your PDE
750
+ elif pde == PDE.YourNewPDE:
751
+ return your_new_pde_residual
752
+ # Other PDEs...
753
+ else:
754
+ raise ValueError(f"Unknown PDE type: {pde}")
755
+ ```
756
+
757
+ ### Step 8: Create a Combined Configuration
758
+
759
+ Create a combined configuration that uses your dataset:
760
+
761
+ ```yaml
762
+ # configs/your_new_pde.yaml
763
+ name: "your_new_pde"
764
+ defaults:
765
+ - _self_
766
+ - base
767
+ - override data: your_new_pde
768
+ ```
769
+
770
+ ### Step 9: Generate and Prepare Data
771
+
772
+ Make sure your data is properly formatted and stored in the expected directory structure:
773
+
774
+ ```
775
+ /path/to/your/data/
776
+ ├── train/
777
+ │ ├── param1=0.1_param2=1.0.h5
778
+ │ ├── param1=0.2_param2=2.0.h5
779
+ │ └── ...
780
+ ├── validation/
781
+ │ ├── param1=0.15_param2=1.5.h5
782
+ │ └── ...
783
+ └── test/
784
+ ├── param1=0.25_param2=2.5.h5
785
+ └── ...
786
+ ```
787
+
788
+ Each HDF5 file should contain:
789
+ - Solution trajectories
790
+ - Grid information (x, y, t)
791
+ - Any other metadata needed for your PDE
792
+
793
+ ### Step 10: Run Experiments
794
+
795
+ You can now run experiments with your new dataset:
796
+
797
+ ```bash
798
+ python train_inverse.py --config-name=your_new_pde
799
+ ```
800
+
801
+ ### Data Format Requirements
802
+
803
+ The primary dataset class `PDE_MultiParam` expects data in HDF5 format with specific structure:
804
+
805
+ - **1D PDEs**: Each HDF5 file contains a single trajectory with keys:
806
+ - `tensor`: The solution field with shape `[time, spatial_dim]`
807
+ - `x-coordinate`: Spatial grid points
808
+ - `t-coordinate`: Time points
809
+
810
+ - **2D PDEs**: Each HDF5 file contains multiple trajectories (one per IC):
811
+ - `0001/data`: Solution field with shape `[time, spatial_dim_1, spatial_dim_2, channels]`
812
+ - `0001/grid/x`: x-coordinates
813
+ - `0001/grid/y`: y-coordinates
814
+ - `0001/grid/t`: Time points
815
+
816
+ - **File naming**: The filename should encode the PDE parameters, following the format expected by `extract_params_from_path`
817
+
MODEL_GUIDE.md ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PDEInvBench
2
+ ## Adding a New Model
3
+
4
+ The PDEInvBench framework is designed to be modular, allowing you to easily add new model architectures. This section describes how to add a new encoder architecture to the repository.
5
+
6
+ ## Table of Contents
7
+ - [Model Architecture Components](#model-architecture-components)
8
+ - [Adding a new model](#adding-a-new-model)
9
+ - [Step 1: Create a New Encoder Class](#step-1-create-a-new-encoder-class)
10
+ - [Step 2: Import and Register Your Model](#step-2-import-and-register-your-model)
11
+ - [Step 3: Create a Configuration File](#step-3-create-a-configuration-file)
12
+ - [Step 4: Run Experiments with Your Model](#step-4-run-experiments-with-your-model)
13
+
14
+ ## Model Architecture Components
15
+
16
+ The inverse model architecture in PDEInvBench consists of three main components:
17
+
18
+
19
+ ```
20
+ Input Solution Field → Encoder → Downsampler → Parameter Network → PDE Parameters
21
+ ```
22
+
23
+ 1. **Encoder**: Extracts features from the input solution field (e.g., FNO, ResNet, ScOT)
24
+ 2. **Downsampler**: Reduces the spatial dimensions of the features (e.g., ConvDownsampler)
25
+ 3. **Parameter Network**: Predicts PDE parameters from the downsampled features
26
+
27
+
28
+ ## Adding a new model
29
+
30
+ When creating a new model, you typically only need to modify one of these components while keeping the others the same.
31
+
32
+ ### Step 1: Create a New Encoder Class
33
+
34
+ First, create a new encoder class in `pdeinvbench/models/encoder.py`. Your new encoder should follow the interface of existing encoders like `FNOEncoder`, `ResnetEncoder`, or `SwinEncoder`:
35
+
36
+ ```python
37
+ import torch
38
+ import torch.nn as nn
39
+ from pdeinvbench.utils.types import PDE
40
+ from pdeinvbench.models.encoder import resolve_number_input_channels
41
+
42
+ class YourEncoder(nn.Module):
43
+ """
44
+ Your custom encoder for PDE inverse problems.
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ n_modes: int, # Or equivalent parameter for your architecture
50
+ n_layers: int,
51
+ n_past: int,
52
+ n_future: int,
53
+ pde: PDE,
54
+ data_channels: int,
55
+ hidden_channels: int,
56
+ use_partials: bool,
57
+ mode: str,
58
+ batch_size: int
59
+ # Add any architecture-specific parameters
60
+ ):
61
+ super(YourEncoder, self).__init__()
62
+
63
+ # Store essential parameters
64
+ self.n_past = n_past
65
+ self.n_future = n_future
66
+ self.pde = pde
67
+ self.data_channels = data_channels
68
+ self.hidden_channels = hidden_channels
69
+ self.use_partials = use_partials
70
+ self.mode = mode
71
+ self.batch_size = batch_size
72
+
73
+
74
+ # Calculate input channels similar to existing encoders
75
+ in_channels = resolve_number_input_channels(
76
+ n_past=n_past,
77
+ data_channels=data_channels,
78
+ use_partials=use_partials,
79
+ pde=pde,
80
+ )
81
+
82
+ # Define your model architecture
83
+ # Example: Custom neural network layers
84
+ self.encoder_layers = nn.ModuleList([
85
+ # Your custom layers here
86
+ nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1),
87
+ nn.ReLU(),
88
+ # Add more layers as needed
89
+ ])
90
+
91
+ # Output layer to match expected output dimensions
92
+ self.output_layer = nn.Conv2d(hidden_channels, hidden_channels, kernel_size=1)
93
+
94
+ def forward(self, x, **kwargs):
95
+ """
96
+ Forward pass of your encoder.
97
+
98
+ Args:
99
+ x: Input tensor of shape [batch, channels, height, width]
100
+ **kwargs: Additional arguments (may include 't' for time-dependent models)
101
+
102
+ Returns:
103
+ Output tensor of shape [batch, hidden_channels, height, width]
104
+ """
105
+ # Implement your forward pass
106
+ for layer in self.encoder_layers:
107
+ x = layer(x)
108
+
109
+ x = self.output_layer(x)
110
+ return x
111
+ ```
112
+
113
+ #### Creating Custom Downsamplers
114
+
115
+ If you need a custom downsampler, create it in `pdeinvbench/models/downsampler.py`:
116
+
117
+ ```python
118
+ import torch
119
+ import torch.nn as nn
120
+
121
+ class YourDownsampler(nn.Module):
122
+ """
123
+ Your custom downsampler for reducing spatial dimensions.
124
+ """
125
+
126
+ def __init__(
127
+ self,
128
+ input_dimension: int,
129
+ n_layers: int,
130
+ in_channels: int,
131
+ out_channels: int,
132
+ kernel_size: int,
133
+ stride: int,
134
+ padding: int,
135
+ dropout: float,
136
+ ):
137
+ super(YourDownsampler, self).__init__()
138
+
139
+ # Define your downsampling layers
140
+ self.layers = nn.ModuleList([
141
+ # Your custom downsampling layers here
142
+ nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
143
+ nn.ReLU(),
144
+ nn.Dropout(dropout),
145
+ ])
146
+
147
+ def forward(self, x):
148
+ """
149
+ Forward pass of your downsampler.
150
+
151
+ Args:
152
+ x: Input tensor of shape [batch, channels, height, width]
153
+
154
+ Returns:
155
+ Downsampled tensor
156
+ """
157
+ for layer in self.layers:
158
+ x = layer(x)
159
+ return x
160
+ ```
161
+
162
+ #### Creating Custom MLPs
163
+
164
+ If you need a custom MLP, create it in `pdeinvbench/models/mlp.py`:
165
+
166
+ ```python
167
+ import torch
168
+ import torch.nn as nn
169
+
170
+ class YourMLP(nn.Module):
171
+ """
172
+ Your custom MLP for parameter prediction.
173
+ """
174
+
175
+ def __init__(
176
+ self,
177
+ in_dim: int,
178
+ hidden_size: int,
179
+ dropout: float,
180
+ out_dim: int,
181
+ num_layers: int,
182
+ activation: str,
183
+ ):
184
+ super(YourMLP, self).__init__()
185
+
186
+ # Define your MLP layers
187
+ layers = []
188
+ current_dim = in_dim
189
+
190
+ for i in range(num_layers):
191
+ layers.append(nn.Linear(current_dim, hidden_size))
192
+ layers.append(nn.ReLU() if activation == "relu" else nn.Tanh())
193
+ layers.append(nn.Dropout(dropout))
194
+ current_dim = hidden_size
195
+
196
+ layers.append(nn.Linear(current_dim, out_dim))
197
+ self.layers = nn.Sequential(*layers)
198
+
199
+ def forward(self, x):
200
+ """
201
+ Forward pass of your MLP.
202
+
203
+ Args:
204
+ x: Input tensor of shape [batch, features]
205
+
206
+ Returns:
207
+ Output tensor of shape [batch, out_dim]
208
+ """
209
+ return self.layers(x)
210
+ ```
211
+
212
+ ### Step 2: Import and Register Your Model
213
+
214
+ Make sure your encoder is imported in `pdeinvbench/models/__init__.py`:
215
+
216
+ ```python
217
+ from .encoder import FNOEncoder, ResnetEncoder, ScOTEncoder, YourEncoder
218
+ ```
219
+
220
+ This makes your encoder available for use in configuration files.
221
+
222
+ ### Step 3: Create a Configuration File
223
+
224
+ The configuration system has three levels:
225
+
226
+ #### 3.1: Create Model Architecture Config
227
+
228
+ Create `configs/model/yourmodel.yaml`:
229
+
230
+ ```yaml
231
+ # configs/model/yourmodel.yaml
232
+ name: "${system_params.name}_yourmodel"
233
+ dropout: ${system_params.yourmodel_dropout}
234
+ predict_variance: False
235
+ hidden_channels: ${system_params.yourmodel_hidden_channels}
236
+ encoder_layers: ${system_params.yourmodel_encoder_layers}
237
+ downsampler_layers: ${system_params.yourmodel_downsampler_layers}
238
+ mlp_layers: ${system_params.yourmodel_mlp_layers}
239
+
240
+ model_config:
241
+ _target_: pdeinvbench.models.inverse_model.InverseModel
242
+ paramnet:
243
+ _target_: pdeinvbench.models.inverse_model.ParameterNet
244
+ pde: ${data.pde}
245
+ normalize: ${system_params.normalize}
246
+ logspace: ${system_params.logspace}
247
+ params_to_predict: ${system_params.params_to_predict}
248
+ predict_variance: ${model.predict_variance}
249
+ mlp_type: ${system_params.mlp_type}
250
+ encoder:
251
+ _target_: pdeinvbench.models.encoder.YourEncoder
252
+ n_modes: ${system_params.yourmodel_n_modes}
253
+ n_past: ${n_past}
254
+ n_future: ${n_future}
255
+ n_layers: ${model.encoder_layers}
256
+ data_channels: ${data.num_channels}
257
+ hidden_channels: ${model.hidden_channels}
258
+ use_partials: True
259
+ pde: ${data.pde}
260
+ mode: ${mode}
261
+ batch_size: ${data.batch_size}
262
+ use_cn: false
263
+ task: inverse
264
+ downsampler: ${system_params.yourmodel_downsampler}
265
+ mlp_hidden_size: ${model.hidden_channels}
266
+ mlp_layers: ${model.mlp_layers}
267
+ mlp_activation: "relu"
268
+ mlp_dropout: ${model.dropout}
269
+ downsample_factor: ${data.downsample_factor}
270
+ ```
271
+
272
+ #### 3.2: Add Defaults to `configs/system_params/base.yaml`
273
+
274
+ Add architecture defaults that work across all PDE systems:
275
+
276
+ ```yaml
277
+ # configs/system_params/base.yaml
278
+
279
+ # ============ YourModel Architecture ============
280
+ yourmodel_hidden_channels: 64
281
+ yourmodel_encoder_layers: 4
282
+ yourmodel_downsampler_layers: 4
283
+ yourmodel_dropout: 0
284
+ yourmodel_mlp_layers: 1
285
+ yourmodel_n_modes: 16
286
+
287
+ yourmodel_downsampler:
288
+ _target_: pdeinvbench.models.downsampler.ConvDownsampler
289
+ input_dimension: ${system_params.downsampler_input_dim}
290
+ n_layers: ${model.downsampler_layers}
291
+ in_channels: ${model.hidden_channels}
292
+ out_channels: ${model.hidden_channels}
293
+ kernel_size: 3
294
+ stride: 1
295
+ padding: 2
296
+ dropout: ${model.dropout}
297
+ ```
298
+
299
+ #### 3.3: (Optional) Add System-Specific Overrides
300
+
301
+ Override defaults for specific systems in `configs/system_params/{system}.yaml`:
302
+
303
+ ```yaml
304
+ # configs/system_params/2dtf.yaml
305
+ defaults:
306
+ - base
307
+
308
+ # ... existing system config ...
309
+
310
+ # Override architecture for this system
311
+ yourmodel_hidden_channels: 128 # Needs larger model
312
+ yourmodel_encoder_layers: 6
313
+ ```
314
+
315
+ **That's it!** Your model now works with all PDE systems:
316
+ ```bash
317
+ python train_inverse.py --config-name=1dkdv model=yourmodel
318
+ python train_inverse.py --config-name=2dtf model=yourmodel
319
+ ```
320
+
321
+
322
+ #### Important Notes
323
+
324
+ - **System-specific parameters** (like `params_to_predict`, `normalize`, `downsampler_input_dim`) go in `configs/system_params/{system}.yaml`
325
+ - **Architecture defaults** go in `configs/system_params/base.yaml`
326
+ - **Model structure** goes in `configs/model/{architecture}.yaml`
327
+ - For special cases like Darcy Flow, override the downsampler in the system_params file:
328
+ ```yaml
329
+ # configs/system_params/2ddf.yaml
330
+ yourmodel_downsampler:
331
+ _target_: pdeinvbench.models.downsampler.IdentityMap
332
+ ```
333
+
334
+ ### Step 4: Run Experiments with Your Model
335
+
336
+ You can now run experiments with your custom model on **any** PDE system:
337
+
338
+ ```bash
339
+ # Use your model with different PDE systems
340
+ python train_inverse.py --config-name=1dkdv model=yourmodel
341
+ python train_inverse.py --config-name=2dtf model=yourmodel
342
+ python train_inverse.py --config-name=2dns model=yourmodel
343
+
344
+ # Use model variants if you created them
345
+ python train_inverse.py --config-name=2drdk model=yourmodel_large
346
+
347
+ # Override parameters from command line
348
+ python train_inverse.py --config-name=2dtf model=yourmodel model.hidden_channels=96
349
+
350
+ # Combine multiple overrides
351
+ python train_inverse.py --config-name=2ddf model=yourmodel data.batch_size=16 model.encoder_layers=6
352
+ ```
README.md ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PDEInvBench
2
+
3
+ A one-stop shop repository for the benchmarking Neural Operators on inverse problems in partial differential equations.
4
+
5
+ <img src="images/pde_objectives_main_fig_1.png" alt="" width="400">
6
+
7
+ ## Overview
8
+
9
+ Inverse problems in partial differential equations (PDEs) involve recovering unknown physical parameters of a system—such as viscosity, diffusivity, or reaction coefficients—from observed spatiotemporal solution fields. Formally, given a PDE
10
+
11
+ `\[F_{\phi}(u(x,t)) = 0,\]`
12
+
13
+ where `\(u(x,t)\)` is the solution field and `\(\phi\)` represents physical parameters, the **forward problem** maps `\(\phi \mapsto u\)`, while the **inverse problem** seeks the reverse mapping `\(u \mapsto \phi\)`.
14
+
15
+ Inverse problems are inherently ill-posed and highly sensitive to noise, making them a challenging yet foundational task in scientific computing and engineering. They arise in diverse applications such as geophysical exploration, fluid mechanics, biomedical imaging, and materials design—where estimating hidden parameters from observed dynamics is essential.
16
+
17
+ **PDEInvBench** provides a comprehensive benchmark for inverse problems in partial differential equations (PDEs). The codebase supports multiple PDE systems, training strategies, and neural network architectures.
18
+
19
+ ## DATASET LINK:
20
+ The datasets used in this project can be found here:
21
+ https://huggingface.co/datasets/DabbyOWL/PDE_Inverse_Problem_Benchmarking/tree/main
22
+
23
+
24
+ ## Table of Contents
25
+ 1. [Overview](#overview)
26
+ 2. [Supported Systems](#supported-systems)
27
+ 3. [Supported Inverse Methods](#supported-inverse-methods)
28
+ 4. [Models Implemented](#models-implemented)
29
+ 5. [Directory Structure](#directory-structure)
30
+ 6. [Environment Setup](#environment-setup)
31
+ 7. [Downloading Data](#downloading-data)
32
+ 8. [Running Experiments](#running-experiments)
33
+ - [How Configs Work](#how-configs-work)
34
+ - [Basic Commands](#basic-commands)
35
+ - [Common Overrides](#common-overrides)
36
+ - [Multi-GPU and Distributed Training](#multi-gpu-and-distributed-training)
37
+ - [Experiment Patterns Along Core Design Axes](#-experiment-patterns-along-core-design-axes)
38
+ - [Training/Optimization Strategies](#1️⃣-trainingoptimization-strategies)
39
+ - [Problem Representation and Inductive Bias](#2️⃣-problem-representation-and-inductive-bias)
40
+ - [Scaling Experiments](#3️⃣-scaling-experiments)
41
+
42
+ 9. [Testing](#Testing)
43
+ 10. [Shape Checking](#Shape-Checking)
44
+ 11. [Adding a New Model](#adding-a-new-model)
45
+ 12. [Adding a New Dataset](#adding-a-new-dataset)
46
+
47
+ ## Supported Systems
48
+
49
+ - **[1D Korteweg–De Vries (KdV) Equation](DATA_GUIDE.md#4d-1d-korteweg-de-vries)**
50
+ - **[2D Reaction Diffusion](DATA_GUIDE.md#4a-2d-reaction-diffusion)**
51
+ - **[2D Unforced Navier Stokes](DATA_GUIDE.md#4b-2d-navier-stokes-unforced)**
52
+ - **[2D Forced Navier Stokes](DATA_GUIDE.md#4c-2d-turbulent-flow-forced-navier-stokes)**
53
+ - **[2D Darcy Flow](DATA_GUIDE.md#4e-2d-darcy-flow)**
54
+
55
+ For detailed technical information on each PDE system — including governing equations, parameter ranges, and dataset download instructions — refer to the [Data Guide](DATA_GUIDE.md).
56
+
57
+ ## Supported Inverse Methods
58
+
59
+ - **Fully data-driven**
60
+ - **PDE Residual Loss**
61
+ - **Test-Time Tailoring (TTT)**
62
+
63
+ ## Models Implemented
64
+
65
+ - **[FNO (Fourier Neural Operator)](https://arxiv.org/pdf/2010.08895)**
66
+ - **[scOT (scalable Operator Transformer)](https://proceedings.neurips.cc/paper_files/paper/2024/file/84e1b1ec17bb11c57234e96433022a9a-Paper-Conference.pdf)**
67
+ - **[ResNet](https://arxiv.org/pdf/1512.03385)**
68
+
69
+ For detailed technical information on the model architecture, refer to the [Model Guide](MODEL_GUIDE.md).
70
+
71
+
72
+ ## Directory Structure
73
+
74
+ ```
75
+ PDEInvBench
76
+ ├── configs/ # Inverse problem Hydra configuration files
77
+ │ ├── callbacks/ # Training callbacks (checkpointing, logging)
78
+ │ ├── data/ # Dataset and data loading configurations
79
+ │ ├── lightning_module/ # PyTorch Lightning module configurations
80
+ │ ├── logging/ # Weights & Biases logging configurations
81
+ │ ├── loss/ # Loss function configurations
82
+ │ ├── lr_scheduler/ # Learning rate scheduler configurations
83
+ │ ├── model/ # Neural network model configurations
84
+ │ ├── optimizer/ # Optimizer configurations
85
+ | ├── system_params # PDE-specific model and experiment parameters
86
+ │ ├── tailoring_optimizer/ # Test-time tailoring optimizer configs
87
+ │ └── trainer/ # PyTorch Lightning trainer configurations
88
+ ├── scripts/ # Utility and data processing scripts
89
+ │ ├── darcy-flow-scripts/ # Darcy flow specific data processing
90
+ │ ├── parameter-perturb/ # Parameter perturbation utilities
91
+ │ ├── reaction-diffusion-scripts/ # Reaction-diffusion data processing
92
+ │ ├── data_splitter.py # Splits datasets into train/validation sets
93
+ │ └── process_navier_stokes.py # Processes raw Navier-Stokes data
94
+ ├── pdeinvbench/ # Main package source code
95
+ │ ├── data/ # Data loading and preprocessing modules
96
+ │ ├── lightning_modules/ # PyTorch Lightning training modules
97
+ │ ├── losses/ # Loss function implementations
98
+ │ ├── models/ # Neural network model implementations
99
+ │ │ ├── __init__.py # Package initialization
100
+ │ │ ├── conv_head.py # Convolutional head for parameter prediction
101
+ │ │ ├── downsampler.py # Spatial downsampling layers
102
+ │ │ ├── encoder.py # FNO and other encoder architectures
103
+ │ │ ├── inverse_model.py # Main inverse problem model
104
+ │ │ └── mlp.py # Multi-layer perceptron components
105
+ │ └── utils/ # Utility functions and type definitions
106
+ │ ├── __init__.py # Package initialization
107
+ │ ├── config_utils.py # Hydra configuration utilities
108
+ │ ├── types.py # Type definitions and PDE system constants
109
+ │ └── ... # Additional utility modules
110
+ └── train_inverse.py # Main training script for inverse problems
111
+ ```
112
+
113
+ ## Environment Setup
114
+
115
+ This project requires **Python 3.11** with PyTorch 2.7, PyTorch Lightning, and several scientific computing libraries.
116
+
117
+ ### Quick Setup (Recommended)
118
+
119
+ Using the provided `environment.yml`:
120
+
121
+ ```bash
122
+ # Create environment (use micromamba or conda)
123
+ conda env create -f environment.yml
124
+ conda activate inv-env-tmp
125
+
126
+ # Install the package in editable mode
127
+ pip install -e .
128
+ ```
129
+
130
+ ### Manual Setup
131
+
132
+ Alternatively, use the `build_env.sh` script:
133
+
134
+ ```bash
135
+ chmod +x build_env.sh
136
+ ./build_env.sh
137
+ ```
138
+
139
+ ### Key Dependencies
140
+
141
+ - **Deep Learning**: PyTorch 2.7, PyTorch Lightning 2.5
142
+ - **Neural Operators**: neuraloperator 0.3.0, scOT (Poseidon fork)
143
+ - **Scientific Computing**: scipy, numpy, h5py, torch-harmonics
144
+ - **Configuration**: Hydra 1.3, OmegaConf 2.3
145
+ - **Logging**: Weights & Biases (wandb)
146
+ - **Type Checking**: jaxtyping 0.3.2, typeguard 2.13.3
147
+
148
+ **Note**: The scOT architecture requires a custom fork installed from GitHub (automatically handled in setup scripts).
149
+
150
+ ### Verify Installation
151
+
152
+ ```bash
153
+ python -c "import torch; import lightning; import pdeinvbench; print('Setup successful!')"
154
+ ```
155
+
156
+ ## Downloading Data
157
+
158
+ We provide datasets on [HuggingFace](https://huggingface.co/datasets/DabbyOWL/PDE_Inverse_Problem_Benchmarking/tree/main) with a convenient download script. Use `huggingface_pdeinv_download.py` to batch download specific datasets and splits:
159
+
160
+ ```bash
161
+ pip install huggingface_hub
162
+ python3 huggingface_pdeinv_download.py --dataset darcy-flow-241 --split train --local-dir ./data
163
+ ```
164
+
165
+ Available datasets include `darcy-flow-241`, `korteweg-de-vries-1d`, `navier-stokes-forced-2d`, `reaction-diffusion-2d-du`, and more. For complete dataset documentation, parameter ranges, and detailed download instructions, see the [Data Guide](DATA_GUIDE.md#2-downloading-data).
166
+
167
+ ## Running Experiments
168
+
169
+ We use `hydra` to manage experiment configurations. The repository supports all combinations of:
170
+ - **PDE Systems**: `1dkdv`, `2drd`, `2dns`, `2dtf`, `2ddf`
171
+ - **Problem Representation**:
172
+ - **Derivative conditioning**
173
+ - **Temporal conditioning**: conditioning on 2, 5,10,15,20,25
174
+ - **Model architectures**: FNO, ResNet, scOT (scalable Operator Transformer)
175
+ - **Training / Optimization strategies**:
176
+ - **Fully data-driven supervision** — standard supervised training using paired parameter–solution data
177
+ - **Physics-informed (residual) training** — includes a PDE residual loss term for self-supervised regularization
178
+ - **Test-Time Tailoring (TTT)** — post-training fine-tuning using the PDE residual at inference time to adapt to new parameter regimes
179
+ - **Scaling**:
180
+ - **Model Scaling**: 500k parameters, 5 million parameters, 50 million parameters
181
+ - **Data scaling**: parameter, initial condition, temporal horizon
182
+ - **Resolution scaling**: 64×64, 128×128, 256×256, 512×512
183
+
184
+ ### How Configs Work
185
+
186
+ #### Base Configs
187
+
188
+ Base configs are located in `configs` and provide starting points for experiments:
189
+
190
+ - Top-level configs (e.g., `1dkdv.yaml`, `2drd.yaml`) combine specific options for datasets, models, and training settings
191
+ - Individual component configs are in subdirectories (e.g., `configs/data/`, `configs/model/`)
192
+
193
+ #### Hydra Override Mechanism
194
+
195
+ Hydra allows you to override any configuration parameter via command line:
196
+
197
+ 1. **Simple parameter override**: `parameter=value`
198
+ 2. **Nested parameter override**: `group.parameter=value`
199
+ 3. **Adding new parameters**: `+new_parameter=value`
200
+
201
+ All overrides are automatically appended to the W&B experiment name for easy tracking.
202
+
203
+ ### Basic Commands
204
+
205
+ ```bash
206
+ # Run with a predefined config
207
+ python3 train_inverse.py --config-name={pde_system}
208
+
209
+ # Run in test mode (evaluation only)
210
+ python3 train_inverse.py --config-name={pde_system} +test_run=true
211
+
212
+ # Load a model from W&B
213
+ python3 train_inverse.py --config-name={pde_system} +inverse_model_wandb_run={project_id}/{project_name}/model-{model_id}:{version}
214
+ ```
215
+ pde_system: `1dkdv`, `2dtf`, `2dns`, `2drdk`, `2drddu`, `2ddf`
216
+ ### Common Overrides
217
+
218
+ #### Data-related Overrides
219
+ ```bash
220
+ # Specify data root directory
221
+ data.data_root=/path/to/data
222
+
223
+ # Control time window sampling
224
+ data.every_nth_window=10
225
+
226
+ # Control fraction of data used
227
+ data.frac_ics_per_param=0.5
228
+ data.frac_param_combinations=0.5
229
+
230
+ # Control train/test temporal split
231
+ data.train_window_end_percent=0.5
232
+ data.test_window_start_percent=0.76
233
+
234
+ # High-resolution data processing
235
+ high_resolution=True
236
+ data.downsample_factor=4 # Downsample from 512x512 to 128x128
237
+ data.downsample_factor=2 # Downsample from 256x256 to 128x128
238
+ data.batch_size=2 # Reduce batch size for higher resolution data
239
+ ```
240
+
241
+ #### Model-related Overrides
242
+ ```bash
243
+ # Choose a model
244
+ model=fno
245
+ model=scot
246
+ model=resnet
247
+ model=fno_50mil # Higher capacity model
248
+ model=fno_500k # Lower capacity model
249
+
250
+ # Configure model parameters
251
+ model.model_config.paramnet.encoder.use_partials=False
252
+
253
+ # Specify which parameters to predict (e.g., for ablation studies)
254
+ model.paramnet.params_to_predict=['Du']
255
+ model.paramnet.params_to_predict=['Dv']
256
+ ```
257
+
258
+ #### Training Overrides
259
+ ```bash
260
+ # Control distributed training
261
+ +trainer.num_nodes=2
262
+
263
+ # Set random seed
264
+ seed=0
265
+
266
+ # Load a pre-trained model
267
+ +inverse_model_wandb_run={project_id}/{project_name}/model-{model_id}:{version}
268
+
269
+ # Enable test-only mode (no training)
270
+ +test_run=true
271
+ ```
272
+
273
+ #### Loss-related Overrides
274
+ ```bash
275
+ # Adjust loss weights
276
+ loss.inverse_param_loss_weight=0
277
+ loss.inverse_residual_loss_weight=1
278
+ ```
279
+
280
+ #### Logging Overrides
281
+ ```bash
282
+ # Set W&B project and directory
283
+ logging.project=my_project
284
+ logging.save_dir=/path/to/wandb/cache
285
+ ```
286
+
287
+ ### Multi-GPU and Distributed Training
288
+
289
+ ```bash
290
+ # Single GPU
291
+ CUDA_VISIBLE_DEVICES=0 python3 train_inverse.py --config-name={pde_system}
292
+
293
+ # Multi-node with SLURM
294
+ srun python3 train_inverse.py --config-name={pde_system} +trainer.num_nodes={num_nodes}
295
+ # num_nodes = 1, 2, 4, etc.
296
+ ```
297
+
298
+ ### 🧪 Experiment Patterns Along Core Design Axes
299
+
300
+ This section provides ready-to-run experiment templates organized by the core research dimensions of the benchmark. Each pattern includes concrete commands and parameter sweep recommendations.
301
+
302
+ ---
303
+
304
+ #### 1️⃣ Training/Optimization Strategies
305
+
306
+ Experiments exploring different supervision and optimization approaches for inverse problems.
307
+
308
+ ##### 1.1 Fully Data-Driven vs Physics-Informed Training
309
+
310
+ ```bash
311
+ # Fully data-driven (no residual loss)
312
+ python3 train_inverse.py --config-name={pde_system} \
313
+ loss.inverse_residual_loss_weight=0
314
+
315
+ # Physics-informed with varying residual weights
316
+ python3 train_inverse.py --config-name={pde_system} \
317
+ loss.inverse_residual_loss_weight={weight}
318
+ # Recommended sweep: weight = 1.0, 0.1, 0.01, 0.001, 0.0001
319
+ ```
320
+
321
+ This allows you to control the balance between data-driven supervision and physics-based regularization for parameter identification.
322
+
323
+ ##### 1.2 Test-Time Tailoring (TTT)
324
+
325
+ At test time, given an observed PDE trajectory `u_{t-k}, ..., u_t`, the inverse model `f_θ` predicts parameters `φ̂ = f_θ(u_{t-k}, ..., u_t)`.
326
+ Test-Time Tailoring (TTT) adapts `f_θ` by minimizing a physics-based self-supervised loss derived from the PDE residual:
327
+
328
+ `L_Tailor = ||F_{φ̂}(u_{t-k}, ..., u_t)||² + α * ( ||f_θ(u_{t-k}, ..., u_t) - f_{θ_frozen}(u_{t-k}, ..., u_t)||² / ||f_{θ_frozen}(u_{t-k}, ..., u_t)||² )`
329
+
330
+ Here `F_{φ̂}` is a discrete approximation of the PDE operator, and `α` controls the strength of the *anchor loss* that stabilizes adaptation. Optimization is performed for a small number of gradient steps on `L_Tailor`, allowing the model to specialize to new or out-of-distribution parameter regimes at inference time.
331
+
332
+ ```bash
333
+ # Basic TTT with pre-trained model
334
+ python3 train_inverse.py --config-name={pde_system}_ttt \
335
+ inverse_model_wandb_run={project_id}/{project_name}/model-{model_id}:{version} \
336
+ tailor_anchor_loss_weight={alpha} \
337
+ num_tailoring_steps={steps} \
338
+ tailoring_optimizer_lr={lr}
339
+
340
+ ```
341
+
342
+ ---
343
+
344
+ #### 2️⃣ Problem Representation and Inductive Bias
345
+
346
+ Experiments exploring how to encode physical problems and what architectural inductive biases work best.
347
+
348
+ ##### 2.1 Conditioning Strategy: Derivatives vs Raw Solutions
349
+
350
+ ```bash
351
+ # Derivative conditioning (spatial/temporal derivatives as input)
352
+ python3 train_inverse.py --config-name={pde_system} \
353
+ model.model_config.paramnet.encoder.use_partials=True
354
+
355
+ # Temporal conditioning (raw solution snapshots only)
356
+ python3 train_inverse.py --config-name={pde_system} \
357
+ model.model_config.paramnet.encoder.use_partials=False
358
+ ```
359
+
360
+ Derivative conditioning provides explicit gradient information from the frames.
361
+
362
+ ##### 2.2 Model Architecture: Inductive Biases
363
+
364
+ ```bash
365
+ # Fourier Neural Operator (spectral bias)
366
+ python3 train_inverse.py --config-name={pde_system} model=fno
367
+
368
+ # ResNet (convolutional locality bias)
369
+ python3 train_inverse.py --config-name={pde_system} model=resnet
370
+
371
+ # scOT - Scalable Operator Transformer (attention-based)
372
+ python3 train_inverse.py --config-name={pde_system} model=scot
373
+ ```
374
+
375
+ ##### 2.3 Temporal Conditioning Frames
376
+
377
+ ```bash
378
+ # Fourier Neural Operator (spectral bias)
379
+ python3 train_inverse.py --config-name={pde_system} n_past={num_frames}
380
+
381
+ # num_frames=2,5,10,15,20
382
+ ```
383
+
384
+
385
+ ---
386
+
387
+ #### 3️⃣ Scaling Experiments
388
+
389
+ Experiments investigating how performance scales with model capacity, data quantity, and spatial resolution.
390
+
391
+ ##### 3.1 Model Capacity Scaling
392
+
393
+ ```bash
394
+ # Small model: ~500k parameters
395
+ python3 train_inverse.py --config-name={pde_system} model=fno_500k
396
+
397
+ # Base model: ~5M parameters
398
+ python3 train_inverse.py --config-name={pde_system} model=fno
399
+
400
+ # Large model: ~50M parameters
401
+ python3 train_inverse.py --config-name={pde_system} model=fno_50mil
402
+ ```
403
+
404
+
405
+ ##### 3.2 Data Quantity Scaling
406
+
407
+ ###### 3.2.1 Initial Condition Diversity Scaling
408
+ ```bash
409
+ python3 train_inverse.py --config-name={pde_system} \
410
+ data.frac_ics_per_param={frac}
411
+ # Recommended sweep: frac = 0.2, 0.35, 0.5, 0.75
412
+ ```
413
+
414
+ Only `frac_ics_per_param` percent of initial trajectories per parameter will be sampled during training, allowing you to control the amount of initial condition diversity and study data efficiency across different initial states.
415
+
416
+ ###### 3.2.2 Parameter Space Coverage Scaling
417
+ ```bash
418
+ python3 train_inverse.py --config-name={pde_system} \
419
+ data.frac_param_combinations={frac}
420
+ # Recommended sweep: frac = 0.2, 0.35, 0.5, 0.75
421
+ ```
422
+
423
+ Only `frac_param_combinations` percent of parameters from the train set will be sampled, allowing you to control parameter space coverage and understand how model performance scales with the diversity of parameter combinations in the training data.
424
+
425
+ ###### 3.2.3 Temporal Horizon Scaling
426
+ ```bash
427
+ python3 train_inverse.py --config-name={pde_system} \
428
+ data.train_window_end_percent={train_end} \
429
+ data.test_window_start_percent={test_start}
430
+ # Recommended sweeps:
431
+ # train_end = 0.25, 0.5, 0.76, 1.0
432
+ # test_start = 0.76
433
+ ```
434
+
435
+ Only the first `train_window_end_percent` of trajectories are used for training, while the last `test_window_start_percent` are used for evaluation. This enables studies on temporal extrapolation and how much temporal dynamics are needed for accurate parameter identification.
436
+
437
+ ##### 3.3 Spatial Resolution Scaling
438
+
439
+ ```bash
440
+ # High-resolution experiments with online downsampling
441
+ python3 train_inverse.py --config-name={pde_system} \
442
+ high_resolution=True \
443
+ data.downsample_factor={factor} \
444
+ data.batch_size={batch_size}
445
+
446
+ # Example configurations:
447
+ # factor=1, 512×512 (full resolution)
448
+ # factor=2, 256×256
449
+ # factor=4, 128×128
450
+ # factor=8, 64×64
451
+ ```
452
+
453
+ The `HIGH_RESOLUTION_PDE_SPATIAL_SIZE` in `pdeinvbench/utils/types.py` defines the maximum resolution (typically 512×512), and the downsampling factor reduces from this maximum. These experiments help determine how resolution affects identifiability of parameters and whether models trained on low-resolution data can generalize to high-resolution inputs.
454
+
455
+ ## Testing
456
+
457
+ The `tests/` directory contains validation scripts to verify the correctness of PDE residual computations and numerical implementations.
458
+
459
+ ### Test Structure
460
+
461
+ - **`test_fluids.py`**: Validates turbulent flow and Navier-Stokes residual computations by comparing PyTorch implementations against NumPy reference implementations
462
+ - **`fluids_numpy_reference.py`**: NumPy reference implementations for fluid dynamics operators (stream function, advection, Laplacian)
463
+ - **`reaction-diffusion-residuals.py`**: Validates reaction-diffusion residual computations and generates visualization GIFs
464
+
465
+ ### Running Tests
466
+
467
+ **Standard pytest (skips tests requiring external data):**
468
+ ```bash
469
+ pytest tests/ -v
470
+ ```
471
+
472
+ ### Test Validation
473
+
474
+ The validation tests verify:
475
+ 1. **Numerical accuracy**: Finite difference operators match reference implementations (error < 1e-3)
476
+ 2. **PDE residuals**: Ground-truth solutions produce near-zero residuals (typically < 1e-4)
477
+ 3. **Operator correctness**: Stream function, advection, Laplacian, and gradient computations
478
+ 4. **Batch independence**: No cross-contamination between batch elements
479
+
480
+ ### Data Requirements
481
+
482
+ Some tests require external HDF5 datasets:
483
+ - Tests automatically **skip** (not fail) when data is unavailable
484
+ - Suitable for CI/CD environments without large datasets
485
+ - For full validation, download datasets following the [Data Guide](DATA_GUIDE.md)
486
+
487
+ ### Visualization
488
+
489
+ Residual validation scripts generate animated GIFs in `test-images/` showing:
490
+ - Temporal evolution of PDE residuals
491
+ - Spatial distribution of numerical errors
492
+ - Threshold-based error highlighting
493
+
494
+
495
+ ## Shape-Checking
496
+
497
+ This codebase uses [jaxtyping](https://github.com/google/jaxtyping) for runtime tensor shape validation, which helps catch dimension mismatches.
498
+
499
+ **To disable shape checking for faster execution:**
500
+ ```bash
501
+ # Disable for production runs
502
+ export JAXTYPING_DISABLE=1
503
+ python train_inverse.py --config-name=2dtf model=fno
504
+
505
+ # Or inline
506
+ JAXTYPING_DISABLE=1 python train_inverse.py --config-name=2dtf model=fno
507
+ ```
508
+
509
+ ## Adding a New Dataset
510
+
511
+ To add a new PDE system to the benchmark, follow the guide in [Data Guide - Section 5: Adding a New Dataset](DATA_GUIDE.md#5-adding-a-new-dataset).
512
+
513
+ ## Adding a New Model
514
+
515
+ To add a new encoder architecture (e.g., Transformer, U-Net), follow the guide in [Model Guide - Adding a New Model](MODEL_GUIDE.md#adding-a-new-model).
configs/1dkdv.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ name: 1dkdv
2
+ defaults:
3
+ - _self_
4
+ - base
5
+ - override system_params: 1dkdv
configs/1dkdv_ttt.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ name: 1dkdv
2
+ defaults:
3
+ - _self_
4
+ - ttt_base
5
+ - override system_params: 1dkdv
6
+
7
+ inverse_model_wandb_run: ml-pdes/1dkdv_test_time_tuning/model-4j475b9v:v199
8
+ # inverse_model_wandb_run: ml-pdes/time_logging_test/model-tw4k8e8h:best
configs/2ddf.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ name: 2ddf
2
+ defaults:
3
+ - base
4
+ - _self_
5
+ - override callbacks: 2ddf
6
+ - override system_params: 2ddf
7
+
8
+ n_past: 1
configs/2ddf_ttt.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ name: 2ddf
2
+ defaults:
3
+ - ttt_base # Load base first
4
+ - _self_ # Then override with this file's values
5
+ - override callbacks: 2ddf
6
+ - override system_params: 2ddf
7
+
8
+ inverse_model_wandb_run: ml-pdes/2ddf_compilation_folded/model-r5fj8hr1:best
9
+ n_past: 1
configs/2dns.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ name: 2dns
2
+ defaults:
3
+ - _self_
4
+ - base
5
+ - override system_params: 2dns
configs/2dns_ttt.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 2dns
2
+ defaults:
3
+ - _self_
4
+ - ttt_base
5
+ - override system_params: 2dns
6
+
7
+
8
+ test_run: true
9
+ inverse_model_wandb_run: ml-pdes/tailoring_redone/model-wuhbdlqr:v200
10
+ # inverse_model_wandb_run: ml-pdes/time_logging_test/model-8mwjk5v0:best
configs/2drddu.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ name: 2drd
2
+ defaults:
3
+ - _self_
4
+ - base
5
+ - override system_params: 2drddu
6
+
7
+ # Note: params_to_predict is already set to ["Du"] in system_params/2drddu.yaml
configs/2drddu_ttt.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 2drd
2
+ defaults:
3
+ - _self_
4
+ - ttt_base
5
+ - override system_params: 2drddu
6
+
7
+
8
+ test_run: true
9
+ inverse_model_wandb_run: ml-pdes/2drddu_compilation/model-lslyzo92:v184 # 100 % ics
10
+ # inverse_model_wandb_run: ml-pdes/2drddu_compilation/model-jupsos6p:best # 20 % ics
11
+ # inverse_model_wandb_run: ml-pdes/time_logging_test/model-71xuth62:best
12
+
configs/2drdk.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ name: 2drd
2
+ defaults:
3
+ - _self_
4
+ - base
5
+ - override system_params: 2drdk
6
+
configs/2drdk_ttt.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 2drd
2
+ defaults:
3
+ - _self_
4
+ - ttt_base
5
+ - override system_params: 2drdk
6
+
7
+
8
+ test_run: true
9
+ inverse_model_wandb_run: ml-pdes/2drdk_compilation/model-30801ssy:v189
10
+ # inverse_model_wandb_run: ml-pdes/time_logging_test/model-o2v1e8oa:best
configs/2dtf.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ name: 2dtf
2
+ defaults:
3
+ - _self_
4
+ - base
5
+ - override system_params: 2dtf
6
+
configs/2dtf_ttt.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 2dtf
2
+ defaults:
3
+ - _self_
4
+ - ttt_base
5
+ - override system_params: 2dtf
6
+
7
+
8
+ test_run: true
9
+ # inverse_model_wandb_run: ml-pdes/2dtf_compilation/model-kjskfseu:v172
10
+ inverse_model_wandb_run: ml-pdes/tailoring_redone/model-h6cc91c4:v182
11
+ # inverse_model_wandb_run: ml-pdes/time_logging_test/model-irns4x30:best
configs/base.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: base
2
+ defaults:
3
+ - _self_
4
+ - callbacks: base
5
+ - model: fno
6
+ - lightning_module: base
7
+ - logging: base
8
+ - loss: relative
9
+ - optimizer: adam
10
+ - trainer: trainer
11
+ - lr_scheduler: cosine
12
+ - system_params: Null
13
+ - data: base
14
+
15
+ n_past: 2
16
+ n_future: -1 #doesn't matter for inverse problems
17
+ mode: "inverse"
18
+ seed: 0
19
+ high_resolution: false
configs/callbacks/2ddf.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ - _target_: lightning.pytorch.callbacks.ModelCheckpoint # save model checkpoints
2
+ monitor: validation/loss
3
+ mode: min
4
+ save_last: True
5
+ - _target_: lightning.pytorch.callbacks.LearningRateMonitor # log learning rate
6
+ logging_interval: epoch
7
+ - _target_: pdeinvbench.lightning_modules.logging_callbacks.InverseErrorByTailoringStepCallback # log error by tailoring step
8
+ - _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorTestTimeTailoringCallback
9
+ - _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorPlottingCallback # stratify error by PDE parameter
configs/callbacks/base.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ - _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorPlottingCallback # stratify error by PDE parameter
2
+ - _target_: lightning.pytorch.callbacks.ModelCheckpoint # save model checkpoints
3
+ monitor: validation/loss
4
+ mode: min
5
+ save_last: True
6
+ save_top_k: 1
7
+ - _target_: lightning.pytorch.callbacks.LearningRateMonitor # log learning rate
8
+ logging_interval: epoch
9
+ - _target_: pdeinvbench.lightning_modules.logging_callbacks.InverseErrorByTailoringStepCallback # log error by tailoring step
10
+ - _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorTestTimeTailoringCallback
configs/data/base.yaml ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # These will be overridden by child configs
2
+ name: "placeholder_inverse"
3
+ data_root: "placeholder_path"
4
+ train_data_root: ${system_params.train_data_root}
5
+ val_data_root: ${system_params.val_data_root}
6
+ ood_data_root: ${system_params.ood_data_root}
7
+ ood_data_root_extreme: ${system_params.ood_data_root_extreme}
8
+ test_data_root: ${system_params.test_data_root}
9
+ num_channels: ${system_params.num_channels}
10
+ batch_size: 8
11
+ dilation: 1
12
+ cutoff_first_n_frames: ${system_params.cutoff_first_n_frames}
13
+ frac_param_combinations: 1
14
+ frac_ics_per_param: 1
15
+ random_sample_param: True
16
+ downsample_factor: 0
17
+ every_nth_window: 10
18
+ train_window_start_percent: 0
19
+ train_window_end_percent: 1
20
+ test_window_start_percent: 0
21
+ test_window_end_percent: 1
22
+
23
+ pde:
24
+ _target_: pdeinvbench.utils.types.PDE
25
+ value: ${system_params.pde_name}
26
+
27
+ train_dataloader:
28
+ _target_: torch.utils.data.DataLoader
29
+ dataset:
30
+ _target_: pdeinvbench.data.PDE_MultiParam
31
+ data_root: ${data.train_data_root}
32
+ pde: ${data.pde}
33
+ n_past: ${n_past}
34
+ train: True
35
+ dilation: ${data.dilation}
36
+ cutoff_first_n_frames: ${data.cutoff_first_n_frames}
37
+ frac_param_combinations: ${data.frac_param_combinations}
38
+ frac_ics_per_param: ${data.frac_ics_per_param}
39
+ random_sample_param: ${data.random_sample_param}
40
+ downsample_factor: ${data.downsample_factor}
41
+ every_nth_window: ${data.every_nth_window}
42
+ window_start_percent: ${data.train_window_start_percent}
43
+ window_end_percent: ${data.train_window_end_percent}
44
+ batch_size: ${data.batch_size}
45
+ shuffle: True
46
+
47
+ val_dataloader:
48
+ _target_: torch.utils.data.DataLoader
49
+ dataset:
50
+ _target_: pdeinvbench.data.PDE_MultiParam
51
+ data_root: ${data.val_data_root}
52
+ pde: ${data.pde}
53
+ n_past: ${n_past}
54
+ train: False
55
+ dilation: ${data.dilation}
56
+ cutoff_first_n_frames: ${data.cutoff_first_n_frames}
57
+ frac_param_combinations: ${data.frac_param_combinations}
58
+ frac_ics_per_param: ${data.frac_ics_per_param}
59
+ random_sample_param: ${data.random_sample_param}
60
+ downsample_factor: ${data.downsample_factor}
61
+ every_nth_window: ${data.every_nth_window}
62
+ window_start_percent: ${data.train_window_start_percent}
63
+ window_end_percent: ${data.train_window_end_percent}
64
+ batch_size: ${data.batch_size}
65
+ shuffle: False
66
+
67
+ ood_dataloader:
68
+ _target_: torch.utils.data.DataLoader
69
+ dataset:
70
+ _target_: pdeinvbench.data.PDE_MultiParam
71
+ data_root: ${data.ood_data_root}
72
+ pde: ${data.pde}
73
+ n_past: ${n_past}
74
+ train: False
75
+ dilation: ${data.dilation}
76
+ cutoff_first_n_frames: ${data.cutoff_first_n_frames}
77
+ downsample_factor: ${data.downsample_factor}
78
+ every_nth_window: ${data.every_nth_window}
79
+ batch_size: ${data.batch_size}
80
+ shuffle: False
81
+
82
+ ood_dataloader_extreme:
83
+ _target_: torch.utils.data.DataLoader
84
+ dataset:
85
+ _target_: pdeinvbench.data.PDE_MultiParam
86
+ data_root: ${data.ood_data_root_extreme}
87
+ pde: ${data.pde}
88
+ n_past: ${n_past}
89
+ train: False
90
+ dilation: ${data.dilation}
91
+ cutoff_first_n_frames: ${data.cutoff_first_n_frames}
92
+ downsample_factor: ${data.downsample_factor}
93
+ every_nth_window: ${data.every_nth_window}
94
+ batch_size: ${data.batch_size}
95
+ shuffle: False
96
+
97
+ test_dataloader:
98
+ _target_: torch.utils.data.DataLoader
99
+ dataset:
100
+ _target_: pdeinvbench.data.PDE_MultiParam
101
+ data_root: ${data.test_data_root}
102
+ pde: ${data.pde}
103
+ n_past: ${n_past}
104
+ train: False
105
+ dilation: ${data.dilation}
106
+ cutoff_first_n_frames: ${data.cutoff_first_n_frames}
107
+ downsample_factor: ${data.downsample_factor}
108
+ every_nth_window: ${data.every_nth_window}
109
+ window_start_percent: ${data.test_window_start_percent}
110
+ window_end_percent: ${data.test_window_end_percent}
111
+ batch_size: ${data.batch_size}
112
+ shuffle: False
configs/lightning_module/base.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ _target_: pdeinvbench.lightning_modules.InverseModule
2
+ pde: ${data.pde}
3
+ n_past: ${n_past}
4
+ batch_size: ${data.batch_size}
5
+ use_partials: ${model.model_config.paramnet.encoder.use_partials}
6
+ params_to_predict: ${model.model_config.paramnet.params_to_predict}
7
+ param_loss_metric: ${loss.param_loss_metric}
8
+ inverse_residual_loss_weight: ${loss.inverse_residual_loss_weight}
9
+ inverse_param_loss_weight: ${loss.inverse_param_loss_weight}
10
+ residual_filter: False
configs/lightning_module/ttt.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - base
3
+
4
+ _target_: pdeinvbench.lightning_modules.InverseTestTimeTailoringModule
5
+ num_tailoring_steps: ${num_tailoring_steps}
6
+ tailor_per_batch: ${tailor_per_batch}
7
+ tailor_anchor_loss_weight: ${tailor_anchor_loss_weight}
8
+ tailor_residual_loss_weight: ${tailor_residual_loss_weight}
configs/logging/base.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ _target_: pdeinvbench.utils.logging_utils.CustomWandbLogger
2
+ entity: "ml-pdes"
3
+ save_dir: "logs"
4
+ project: ${data.name}
configs/loss/mse.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ param_loss_metric:
2
+ _target_: pdeinvbench.utils.types.ParamMetrics
3
+ value: "Mean Squared Error"
4
+ inverse_residual_loss_weight: 0
5
+ inverse_param_loss_weight: 1
configs/loss/relative.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ param_loss_metric:
2
+ _target_: pdeinvbench.utils.types.ParamMetrics
3
+ value: "Relative Error"
4
+ inverse_residual_loss_weight: 0
5
+ inverse_param_loss_weight: 1
configs/lr_scheduler/cosine.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ _target_: torch.optim.lr_scheduler.CosineAnnealingLR
2
+ T_max: ${trainer.max_epochs}
configs/model/fno.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Shared FNO model configuration
2
+ # Interpolates ALL parameters from system_params
3
+ name: "${system_params.name}_fno"
4
+ dropout: ${system_params.fno_dropout}
5
+ hidden_channels: ${system_params.fno_hidden_channels}
6
+ encoder_layers: ${system_params.fno_encoder_layers}
7
+ downsampler_layers: ${system_params.fno_downsampler_layers}
8
+ mlp_layers: ${system_params.fno_mlp_layers}
9
+
10
+ model_config:
11
+ _target_: pdeinvbench.models.inverse_model.InverseModel
12
+ paramnet:
13
+ _target_: pdeinvbench.models.inverse_model.ParameterNet
14
+ pde: ${data.pde}
15
+ normalize: ${system_params.normalize}
16
+ logspace: ${system_params.logspace}
17
+ params_to_predict: ${system_params.params_to_predict}
18
+ mlp_type: ${system_params.mlp_type}
19
+ encoder:
20
+ _target_: pdeinvbench.models.encoder.FNOEncoder
21
+ n_modes: ${system_params.fno_n_modes}
22
+ n_past: ${n_past}
23
+ n_future: ${n_future}
24
+ n_layers: ${model.encoder_layers}
25
+ data_channels: ${data.num_channels}
26
+ hidden_channels: ${model.hidden_channels}
27
+ use_partials: True
28
+ pde: ${data.pde}
29
+ batch_size: ${data.batch_size}
30
+ downsampler: ${system_params.fno_downsampler}
31
+ mlp_hidden_size: ${model.hidden_channels}
32
+ mlp_layers: ${model.mlp_layers}
33
+ mlp_activation: "relu"
34
+ mlp_dropout: ${model.dropout}
35
+ downsample_factor: ${data.downsample_factor}
36
+
configs/model/fno_50k.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Small FNO variant (500k params)
2
+ # Inherits structure from fno.yaml, only overrides size parameters
3
+ defaults:
4
+ - fno
5
+
6
+ name: "${system_params.name}_fno_50k"
7
+ hidden_channels: ${system_params.fno_hidden_channels_50k}
8
+ encoder_layers: ${system_params.fno_encoder_layers_50k}
9
+
configs/model/fno_50mil.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Large FNO variant (50 million params)
2
+ # Inherits structure from fno.yaml, only overrides size parameters
3
+ defaults:
4
+ - fno
5
+
6
+ name: "${system_params.name}_fno_50mil"
7
+ hidden_channels: ${system_params.fno_hidden_channels_50mil}
8
+ encoder_layers: ${system_params.fno_encoder_layers_50mil}
9
+
configs/model/resnet.yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Shared ResNet model configuration
2
+ # Interpolates ALL parameters from system_params
3
+ name: "${system_params.name}_resnet"
4
+ dropout: ${system_params.resnet_dropout}
5
+ hidden_channels: ${system_params.resnet_hidden_channels}
6
+ encoder_layers: ${system_params.resnet_encoder_layers}
7
+ downsampler_layers: ${system_params.resnet_downsampler_layers}
8
+ mlp_layers: ${system_params.resnet_mlp_layers}
9
+
10
+ model_config:
11
+ _target_: pdeinvbench.models.inverse_model.InverseModel
12
+ paramnet:
13
+ _target_: pdeinvbench.models.inverse_model.ParameterNet
14
+ pde: ${data.pde}
15
+ normalize: ${system_params.normalize}
16
+ logspace: ${system_params.logspace}
17
+ params_to_predict: ${system_params.params_to_predict}
18
+ mlp_type: ${system_params.mlp_type}
19
+ encoder:
20
+ _target_: pdeinvbench.models.encoder.ResnetEncoder
21
+ n_past: ${n_past}
22
+ n_future: ${n_future}
23
+ n_layers: ${model.encoder_layers}
24
+ data_channels: ${data.num_channels}
25
+ hidden_channels: ${model.hidden_channels}
26
+ use_partials: True
27
+ pde: ${data.pde}
28
+ batch_size: ${data.batch_size}
29
+ downsampler: ${system_params.resnet_downsampler}
30
+ mlp_hidden_size: ${model.hidden_channels}
31
+ mlp_layers: ${model.mlp_layers}
32
+ mlp_activation: "relu"
33
+ mlp_dropout: ${model.dropout}
34
+ downsample_factor: ${data.downsample_factor}
35
+
configs/model/scot.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Shared ScOT model configuration
2
+ # Interpolates ALL parameters from system_params
3
+ name: "${system_params.name}_scot"
4
+ dropout: ${system_params.scot_dropout}
5
+ hidden_channels: ${system_params.scot_hidden_channels}
6
+ encoder_layers: ${system_params.scot_encoder_layers}
7
+ downsampler_layers: ${system_params.scot_downsampler_layers}
8
+ mlp_layers: ${system_params.scot_mlp_layers}
9
+
10
+ model_config:
11
+ _target_: pdeinvbench.models.inverse_model.InverseModel
12
+ paramnet:
13
+ _target_: pdeinvbench.models.inverse_model.ParameterNet
14
+ pde: ${data.pde}
15
+ normalize: ${system_params.normalize}
16
+ logspace: ${system_params.logspace}
17
+ params_to_predict: ${system_params.params_to_predict}
18
+ mlp_type: ${system_params.mlp_type}
19
+ encoder:
20
+ _target_: pdeinvbench.models.encoder.ScOTEncoder
21
+ embed_dim: ${system_params.scot_embed_dim}
22
+ n_layers: ${model.encoder_layers}
23
+ hidden_size: ${system_params.scot_hidden_size}
24
+ patch_size: ${system_params.scot_patch_size}
25
+ num_heads: ${system_params.scot_num_heads}
26
+ skip_connections: ${system_params.scot_skip_connections}
27
+ depths: ${system_params.scot_depths}
28
+ n_past: ${n_past}
29
+ n_future: ${n_future}
30
+ use_partials: True
31
+ data_channels: ${data.num_channels}
32
+ pde: ${data.pde}
33
+ batch_size: ${data.batch_size}
34
+ downsampler: ${system_params.scot_downsampler}
35
+ mlp_hidden_size: ${system_params.scot_mlp_hidden_size}
36
+ mlp_layers: ${model.mlp_layers}
37
+ mlp_activation: "relu"
38
+ mlp_dropout: ${model.dropout}
39
+ condition_on_time: ${system_params.scot_condition_on_time}
40
+ downsample_factor: ${data.downsample_factor}
41
+
configs/optimizer/adam.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ _target_: torch.optim.Adam
2
+ lr: 0.0001
configs/system_params/1dkdv.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================
2
+ # 1DKDV SYSTEM PARAMETERS
3
+ # ============================================
4
+ defaults:
5
+ - base
6
+
7
+ # ============ Data Parameters ============
8
+ name: "1dkdv_inverse"
9
+ data_root: "/data/shared/meta-pde/folded_data/kdv/fold_2"
10
+ pde_name: "Korteweg-de Vries 1D"
11
+ num_channels: 1
12
+ cutoff_first_n_frames: 0
13
+
14
+ # ============ Model Parameters ============
15
+ downsampler_input_dim: 1 # 1D system
16
+ params_to_predict: ["delta"]
17
+ normalize: True
configs/system_params/2ddf.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================
2
+ # 2DDF SYSTEM PARAMETERS
3
+ # ============================================
4
+ defaults:
5
+ - base
6
+
7
+ # ============ Data Parameters ============
8
+ name: "2ddf_inverse"
9
+ data_root: "/data/shared/meta-pde/darcy-flow/r241_folded/"
10
+ pde_name: "Darcy Flow 2D"
11
+ num_channels: 1
12
+ cutoff_first_n_frames: 0
13
+
14
+ # ============ Model Parameters ============
15
+ params_to_predict: ["coeff"]
16
+ normalize: False
17
+ mlp_type: "conv" # Special: 2ddf uses conv MLP
18
+
19
+ # Override downsamplers: 2ddf uses IdentityMap instead of ConvDownsampler
20
+ fno_downsampler:
21
+ _target_: pdeinvbench.models.downsampler.IdentityMap
22
+
23
+ resnet_downsampler:
24
+ _target_: pdeinvbench.models.downsampler.IdentityMap
25
+
26
+ scot_downsampler:
27
+ _target_: pdeinvbench.models.downsampler.IdentityMap
configs/system_params/2dns.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================
2
+ # 2DNS SYSTEM PARAMETERS
3
+ # ============================================
4
+ defaults:
5
+ - base
6
+
7
+ # ============ Data Parameters ============
8
+ name: "2dns_inverse"
9
+ data_root: "/data/divyam123/meta-pde/sampled_parameters_split/navierstokes64"
10
+ pde_name: "Navier Stokes 2D"
11
+ num_channels: 1
12
+ cutoff_first_n_frames: 0
13
+
14
+ # ============ Model Parameters ============
15
+ params_to_predict: ["re"]
16
+ normalize: False
configs/system_params/2drddu.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================
2
+ # 2DRD-DU SYSTEM PARAMETERS
3
+ # ============================================
4
+ defaults:
5
+ - 2drdk
6
+ data_root: "/data/shared/meta-pde/folded_data/reaction-diffusion-2d/Du_fold_2"
7
+
8
+ # ============ Model Parameters ============
9
+ params_to_predict: ["Du"]
configs/system_params/2drdk.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================
2
+ # 2DRD-K SYSTEM PARAMETERS
3
+ # ============================================
4
+ defaults:
5
+ - base
6
+
7
+ # ============ Data Parameters ============
8
+ name: "2drdk_inverse"
9
+ data_root: "/data/shared/meta-pde/folded_data/reaction-diffusion-2d/k_fold_2"
10
+ pde_name: "Reaction Diffusion 2D"
11
+ num_channels: 2
12
+ cutoff_first_n_frames: 2
13
+ # Special override for corner extreme OOD
14
+ ood_data_root_extreme: ${system_params.data_root}/out_of_distribution_corner_extreme
15
+
16
+ # ============ Model Parameters ============
17
+ params_to_predict: ["k"]
18
+ normalize: False
configs/system_params/2dtf.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================
2
+ # 2DTF SYSTEM PARAMETERS
3
+ # ============================================
4
+ defaults:
5
+ - base
6
+
7
+ # ============ Data Parameters ============
8
+ name: "2dtf_inverse"
9
+ data_root: "/data/shared/meta-pde/folded_data/turbulent-flow-2d/fold_2"
10
+ pde_name: "Turbulent Flow 2D"
11
+ num_channels: 1
12
+ cutoff_first_n_frames: 0
13
+
14
+ # ============ Model Parameters ============
15
+ params_to_predict: ["nu"]
16
+ normalize: True
configs/system_params/base.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Base system parameters
2
+ # Defines common structure and defaults for BOTH data AND model
3
+ # Each system inherits this and overrides specific values
4
+
5
+ # ============ Data Parameters ============
6
+ name: "placeholder_inverse"
7
+ data_root: "placeholder_path"
8
+ train_data_root: ${system_params.data_root}/train
9
+ val_data_root: ${system_params.data_root}/validation
10
+ ood_data_root: ${system_params.data_root}/out_of_distribution
11
+ ood_data_root_extreme: ${system_params.data_root}/out_of_distribution_extreme
12
+ test_data_root: ${system_params.data_root}/test
13
+ pde_name: "placeholder_pde"
14
+ num_channels: 1
15
+ cutoff_first_n_frames: 0
16
+
17
+ # ============ Model - System-Specific Parameters ============
18
+ params_to_predict: []
19
+ normalize: False
20
+ logspace: False
21
+ mlp_type: "mlp" # Default to standard MLP (2ddf overrides to "conv")
22
+ downsampler_input_dim: 2 # 1 for 1D systems, 2 for 2D systems
23
+
24
+ # ============ FNO Architecture ============
25
+ fno_hidden_channels: 64
26
+ fno_encoder_layers: 4
27
+ fno_downsampler_layers: 4
28
+ fno_dropout: 0
29
+ fno_mlp_layers: 1
30
+ fno_n_modes: 16
31
+
32
+ fno_hidden_channels_50k: 16
33
+ fno_encoder_layers_50k: 6
34
+
35
+ fno_hidden_channels_50mil: 200
36
+ fno_encoder_layers_50mil: 4
37
+
38
+ fno_downsampler:
39
+ _target_: pdeinvbench.models.downsampler.ConvDownsampler
40
+ input_dimension: ${system_params.downsampler_input_dim}
41
+ n_layers: ${model.downsampler_layers}
42
+ in_channels: ${model.hidden_channels}
43
+ out_channels: ${model.hidden_channels}
44
+ kernel_size: 3
45
+ stride: 1
46
+ padding: 2
47
+ dropout: ${model.dropout}
48
+
49
+ # ============ ResNet Architecture ============
50
+ resnet_hidden_channels: 128
51
+ resnet_encoder_layers: 13
52
+ resnet_downsampler_layers: 4
53
+ resnet_dropout: 0
54
+ resnet_mlp_layers: 1
55
+
56
+ resnet_downsampler:
57
+ _target_: pdeinvbench.models.downsampler.ConvDownsampler
58
+ input_dimension: ${system_params.downsampler_input_dim}
59
+ n_layers: ${model.downsampler_layers}
60
+ in_channels: ${model.hidden_channels}
61
+ out_channels: ${model.hidden_channels}
62
+ kernel_size: 3
63
+ stride: 1
64
+ padding: 2
65
+ dropout: ${model.dropout}
66
+
67
+ # ============ ScOT Architecture ============
68
+ scot_hidden_channels: 32
69
+ scot_encoder_layers: 4
70
+ scot_downsampler_layers: 4
71
+ scot_dropout: 0
72
+ scot_mlp_layers: 1
73
+ scot_mlp_hidden_size: 32
74
+ scot_condition_on_time: False
75
+ scot_embed_dim: 36
76
+ scot_hidden_size: 32
77
+ scot_patch_size: 4
78
+ scot_num_heads: [3, 6, 12, 24]
79
+ scot_skip_connections: [2, 2, 2, 2]
80
+ scot_depths: [1, 1, 1, 1]
81
+
82
+ scot_downsampler:
83
+ _target_: pdeinvbench.models.downsampler.ConvDownsampler
84
+ input_dimension: ${system_params.downsampler_input_dim}
85
+ n_layers: ${model.downsampler_layers}
86
+ in_channels: ${model.hidden_channels}
87
+ out_channels: ${model.hidden_channels}
88
+ kernel_size: 3
89
+ stride: 1
90
+ padding: 2
91
+ dropout: ${model.dropout}
configs/tailoring_optimizer/adam.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ _target_: torch.optim.Adam
2
+ lr: ${tailoring_optimizer_lr}
configs/tailoring_optimizer/sgd.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ _target_: torch.optim.SGD
2
+ lr: ${tailoring_optimizer_lr}
configs/trainer/trainer.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ _target_: lightning.Trainer
2
+ max_epochs: 200
3
+ log_every_n_steps: 10
4
+ callbacks: ${callbacks}
configs/ttt_base.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: ttt_base
2
+ defaults:
3
+ - _self_
4
+ - base
5
+ - tailoring_optimizer: adam
6
+ - override lightning_module: ttt
7
+
8
+ test_run: true
9
+
10
+ tailor_anchor_loss_weight: 1
11
+ tailor_residual_loss_weight: 1
12
+ tailor_per_batch: True
13
+ num_tailoring_steps: 50
14
+ tailoring_optimizer_lr: 0.00001
environment.yml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: inv-env-tmp
2
+ channels:
3
+ - defaults
4
+ - conda-forge
5
+ dependencies:
6
+ - _libgcc_mutex=0.1=main
7
+ - _openmp_mutex=5.1=1_gnu
8
+ - bzip2=1.0.8=h5eee18b_6
9
+ - ca-certificates=2025.2.25=h06a4308_0
10
+ - ld_impl_linux-64=2.40=h12ee557_0
11
+ - libffi=3.4.4=h6a678d5_1
12
+ - libgcc-ng=11.2.0=h1234567_1
13
+ - libgomp=11.2.0=h1234567_1
14
+ - libstdcxx-ng=11.2.0=h1234567_1
15
+ - libuuid=1.41.5=h5eee18b_0
16
+ - ncurses=6.4=h6a678d5_0
17
+ - openssl=1.1.1w=h7f8727e_0
18
+ - pip=25.1=pyhc872135_2
19
+ - python=3.11.0=h7a1cb2a_3
20
+ - readline=8.2=h5eee18b_0
21
+ - setuptools=78.1.1=py311h06a4308_0
22
+ - sqlite=3.45.3=h5eee18b_0
23
+ - tk=8.6.14=h39e8969_0
24
+ - wheel=0.45.1=py311h06a4308_0
25
+ - xz=5.6.4=h5eee18b_1
26
+ - zlib=1.2.13=h5eee18b_1
27
+ - pip:
28
+ - accelerate==1.7.0
29
+ - aiohappyeyeballs==2.6.1
30
+ - aiohttp==3.11.18
31
+ - aiosignal==1.3.2
32
+ - annotated-types==0.7.0
33
+ - antlr4-python3-runtime==4.9.3
34
+ - attrs==25.3.0
35
+ - black==25.1.0
36
+ - certifi==2025.4.26
37
+ - charset-normalizer==3.4.2
38
+ - click==8.2.0
39
+ - configmypy==0.2.0
40
+ - contourpy==1.3.2
41
+ - crc32c==2.7.1
42
+ - cycler==0.12.1
43
+ - decorator==5.2.1
44
+ - docker-pycreds==0.4.0
45
+ - donfig==0.8.1.post1
46
+ - filelock==3.18.0
47
+ - fonttools==4.58.0
48
+ - frozenlist==1.6.0
49
+ - fsspec==2025.3.2
50
+ - gitdb==4.0.12
51
+ - gitpython==3.1.44
52
+ - h5py==3.13.0
53
+ - huggingface-hub==0.31.2
54
+ - hydra-core==1.3.2
55
+ - idna==3.10
56
+ - imageio==2.37.0
57
+ - imageio-ffmpeg==0.6.0
58
+ - iniconfig==2.1.0
59
+ - jaxtyping==0.3.2
60
+ - jinja2==3.1.6
61
+ - kiwisolver==1.4.8
62
+ - lightning==2.5.1.post0
63
+ - lightning-utilities==0.14.3
64
+ - markupsafe==3.0.2
65
+ - matplotlib==3.10.3
66
+ - moviepy==2.1.2
67
+ - mpmath==1.3.0
68
+ - multidict==6.4.3
69
+ - mypy-extensions==1.1.0
70
+ - narwhals==1.39.1
71
+ - networkx==3.4.2
72
+ - neuraloperator==0.3.0
73
+ - numcodecs==0.16.0
74
+ - numpy==2.2.5
75
+ - nvidia-cublas-cu12==12.6.4.1
76
+ - nvidia-cuda-cupti-cu12==12.6.80
77
+ - nvidia-cuda-nvrtc-cu12==12.6.77
78
+ - nvidia-cuda-runtime-cu12==12.6.77
79
+ - nvidia-cudnn-cu12==9.5.1.17
80
+ - nvidia-cufft-cu12==11.3.0.4
81
+ - nvidia-cufile-cu12==1.11.1.6
82
+ - nvidia-curand-cu12==10.3.7.77
83
+ - nvidia-cusolver-cu12==11.7.1.2
84
+ - nvidia-cusparse-cu12==12.5.4.2
85
+ - nvidia-cusparselt-cu12==0.6.3
86
+ - nvidia-nccl-cu12==2.26.2
87
+ - nvidia-nvjitlink-cu12==12.6.85
88
+ - nvidia-nvtx-cu12==12.6.77
89
+ - omegaconf==2.3.0
90
+ - opt-einsum==3.4.0
91
+ - packaging==24.2
92
+ - pandas==2.2.3
93
+ - pathspec==0.12.1
94
+ - pillow==10.4.0
95
+ - platformdirs==4.3.8
96
+ - plotly==6.1.0
97
+ - pluggy==1.6.0
98
+ - proglog==0.1.12
99
+ - propcache==0.3.1
100
+ - protobuf==6.31.0
101
+ - psutil==7.0.0
102
+ - pydantic==2.11.4
103
+ - pydantic-core==2.33.2
104
+ - pyparsing==3.2.3
105
+ - pytest==8.3.5
106
+ - pytest-mock==3.14.0
107
+ - python-dateutil==2.9.0.post0
108
+ - python-dotenv==1.1.0
109
+ - pytorch-lightning==2.5.1.post0
110
+ - pytz==2025.2
111
+ - pyyaml==6.0.2
112
+ - regex==2024.11.6
113
+ - requests==2.32.3
114
+ - ruamel-yaml==0.18.10
115
+ - ruamel-yaml-clib==0.2.12
116
+ - safetensors==0.5.3
117
+ - scipy==1.15.3
118
+ - scoringrules==0.7.1
119
+ - scot==1.0.0
120
+ - sentry-sdk==2.28.0
121
+ - setproctitle==1.3.6
122
+ - six==1.17.0
123
+ - smmap==5.0.2
124
+ - sympy==1.14.0
125
+ - tensorly==0.9.0
126
+ - tensorly-torch==0.5.0
127
+ - tokenizers==0.21.1
128
+ - torch==2.7.0
129
+ - torch-harmonics==0.7.3
130
+ - torchmetrics==1.7.1
131
+ - torchvision==0.22.0
132
+ - tqdm==4.67.1
133
+ - transformers==4.51.3
134
+ - triton==3.3.0
135
+ - typeguard==2.13.3
136
+ - typing-extensions==4.13.2
137
+ - typing-inspection==0.4.0
138
+ - tzdata==2025.2
139
+ - urllib3==2.4.0
140
+ - wadler-lindig==0.1.6
141
+ - wandb==0.19.11
142
+ - yarl==1.20.0
143
+ - zarr==3.0.7
144
+ prefix: /home/divyam123/miniconda3/envs/inv-env-tmp
fluid_stats.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Compute energy spectra from vorticity field data.
4
+
5
+ This script loads vorticity trajectory data from a .npy file and computes
6
+ the azimuthally averaged energy spectrum E(k). It outputs both the spectrum
7
+ data as a .npz file and a visualization plot as a .png file.
8
+
9
+ To run direct numerical simulations and get fluid fields, please use Jax-CFD: https://github.com/google/jax-cfd
10
+ Commit hash we used: 0c17e3855702f884265b97bd6ff0793c34f3155e
11
+
12
+ Usage:
13
+ uv run python fluid_stats.py path/to/vorticity.npy --out_dir results/
14
+ """
15
+
16
+ import argparse
17
+ import logging
18
+ import os
19
+ from functools import partial
20
+
21
+ import jax
22
+ import jax.numpy as jnp
23
+ import matplotlib.pyplot as plt
24
+ import numpy as np
25
+ from jax import jit, vmap
26
+ from tqdm import tqdm
27
+
28
+ # Configure logging
29
+ logging.basicConfig(
30
+ level=logging.INFO,
31
+ format="%(asctime)s - %(levelname)s - %(message)s",
32
+ datefmt="%Y-%m-%d %H:%M:%S",
33
+ )
34
+ logger = logging.getLogger(__name__)
35
+
36
+
37
+ # =============================================================================
38
+ # Core computation functions
39
+ # =============================================================================
40
+
41
+
42
+ @jit
43
+ def vorticity_to_velocity(vorticity):
44
+ """
45
+ Convert vorticity to velocity components using the streamfunction.
46
+
47
+ Solves the Poisson equation in Fourier space: psi_hat = -vorticity_hat / k^2
48
+ Then computes velocity from streamfunction: u_x = -d(psi)/dy, u_y = d(psi)/dx
49
+
50
+ Parameters
51
+ ----------
52
+ vorticity : jnp.ndarray, shape (X, Y)
53
+ 2D vorticity field on a square grid.
54
+
55
+ Returns
56
+ -------
57
+ u_x : jnp.ndarray, shape (X, Y)
58
+ x-component of velocity.
59
+ u_y : jnp.ndarray, shape (X, Y)
60
+ y-component of velocity.
61
+ """
62
+ N = vorticity.shape[0]
63
+
64
+ # Compute streamfunction from vorticity using Poisson equation
65
+ # In Fourier space: psi_hat = -vorticity_hat / k^2
66
+ vort_hat = jnp.fft.fft2(vorticity)
67
+
68
+ # Create wavenumber arrays
69
+ kx = jnp.fft.fftfreq(N, d=1.0) * 2 * jnp.pi
70
+ ky = jnp.fft.fftfreq(N, d=1.0) * 2 * jnp.pi
71
+ KX, KY = jnp.meshgrid(kx, ky, indexing="ij")
72
+ K2 = KX**2 + KY**2
73
+
74
+ # Avoid division by zero at k=0
75
+ K2 = K2.at[0, 0].set(1.0)
76
+ psi_hat = -vort_hat / K2
77
+ psi_hat = psi_hat.at[0, 0].set(0.0) # Set mean streamfunction to zero
78
+
79
+ # Compute velocity components from streamfunction
80
+ # u_x = -d(psi)/dy, u_y = d(psi)/dx
81
+ u_x_hat = -1j * KY * psi_hat
82
+ u_y_hat = 1j * KX * psi_hat
83
+
84
+ u_x = jnp.real(jnp.fft.ifft2(u_x_hat))
85
+ u_y = jnp.real(jnp.fft.ifft2(u_y_hat))
86
+
87
+ return u_x, u_y
88
+
89
+
90
+ @partial(jit, static_argnames=["k_max"])
91
+ def energy_spectrum_single(u_x, u_y, k_max=None):
92
+ """
93
+ Compute azimuthally averaged energy spectrum E(k) for a single velocity field.
94
+
95
+ The energy spectrum is computed by binning the 2D Fourier-transformed
96
+ velocity field by wavenumber magnitude |k|.
97
+
98
+ Parameters
99
+ ----------
100
+ u_x : jnp.ndarray, shape (X, Y)
101
+ x-component of velocity.
102
+ u_y : jnp.ndarray, shape (X, Y)
103
+ y-component of velocity.
104
+ k_max : int, optional
105
+ Maximum wavenumber to compute. If None, uses N//3 (2/3 dealiasing rule).
106
+
107
+ Returns
108
+ -------
109
+ E : jnp.ndarray, shape (k_max+1,)
110
+ Energy spectrum E(k) for k = 0, 1, ..., k_max.
111
+ """
112
+ N = u_x.shape[0]
113
+
114
+ # FFT, shifted so k=0 is at centre
115
+ Ux = jnp.fft.fftshift(jnp.fft.fft2(u_x))
116
+ Ux = Ux / (N**2)
117
+ Uy = jnp.fft.fftshift(jnp.fft.fft2(u_y))
118
+ Uy = Uy / (N**2)
119
+
120
+ # Integer wave numbers
121
+ kx = jnp.fft.fftshift(jnp.fft.fftfreq(N)) * N
122
+ ky = kx
123
+ KX, KY = jnp.meshgrid(kx, ky)
124
+ K = jnp.hypot(KX, KY).astype(jnp.int32)
125
+
126
+ if k_max is None: # Nyquist under 2/3 de-alias
127
+ k_max = N // 3
128
+
129
+ # Vectorized computation of energy spectrum
130
+ def compute_E_k(k):
131
+ mask = K == k
132
+ return 0.5 * jnp.sum(jnp.abs(Ux) ** 2 * mask + jnp.abs(Uy) ** 2 * mask)
133
+
134
+ k_vals = jnp.arange(k_max + 1)
135
+ E = vmap(compute_E_k)(k_vals)
136
+
137
+ return E
138
+
139
+
140
+ @partial(jit, static_argnames=["k_max"])
141
+ def energy_spectrum_from_vorticity(vorticity, k_max=None):
142
+ """
143
+ Compute energy spectrum from vorticity field using vmap.
144
+
145
+ Suitable for moderate resolution fields (up to ~1024x1024).
146
+ For larger resolutions, use energy_spectrum_from_vorticity_lax_map.
147
+
148
+ Parameters
149
+ ----------
150
+ vorticity : jnp.ndarray, shape (T, X, Y)
151
+ Vorticity field over T time steps on an X x Y grid.
152
+ k_max : int, optional
153
+ Maximum wavenumber. If None, uses N//3 (2/3 dealiasing rule).
154
+
155
+ Returns
156
+ -------
157
+ E : jnp.ndarray, shape (T, k_max+1)
158
+ Energy spectrum for each time step.
159
+ """
160
+ N = vorticity.shape[1]
161
+
162
+ if k_max is None:
163
+ k_max = N // 3
164
+
165
+ def process_timestep(vort_t):
166
+ u_x, u_y = vorticity_to_velocity(vort_t)
167
+ return energy_spectrum_single(u_x, u_y, k_max)
168
+
169
+ # Vectorize over time dimension
170
+ E = vmap(process_timestep)(vorticity)
171
+
172
+ return E
173
+
174
+
175
+ @partial(jit, static_argnames=["k_max", "batch_size"])
176
+ def energy_spectrum_from_vorticity_lax_map(vorticity, k_max=None, batch_size=16):
177
+ """
178
+ Compute energy spectrum from vorticity field using jax.lax.map.
179
+
180
+ Memory-efficient version suitable for high resolution fields (>1024x1024).
181
+ Processes timesteps sequentially to reduce memory footprint.
182
+
183
+ Parameters
184
+ ----------
185
+ vorticity : jnp.ndarray, shape (T, X, Y)
186
+ Vorticity field over T time steps on an X x Y grid.
187
+ k_max : int, optional
188
+ Maximum wavenumber. If None, uses N//3 (2/3 dealiasing rule).
189
+ batch_size : int, optional
190
+ Batch size for lax.map processing. Default is 16.
191
+
192
+ Returns
193
+ -------
194
+ E : jnp.ndarray, shape (T, k_max+1)
195
+ Energy spectrum for each time step.
196
+ """
197
+ N = vorticity.shape[1]
198
+
199
+ if k_max is None:
200
+ k_max = N // 3
201
+
202
+ def process_timestep(vort_t):
203
+ u_x, u_y = vorticity_to_velocity(vort_t)
204
+ return energy_spectrum_single(u_x, u_y, k_max)
205
+
206
+ # Use lax.map instead of vmap for memory efficiency
207
+ E = jax.lax.map(process_timestep, vorticity, batch_size=batch_size)
208
+
209
+ return E
210
+
211
+
212
+ # =============================================================================
213
+ # Main script
214
+ # =============================================================================
215
+
216
+
217
+ def parse_args():
218
+ """Parse command line arguments."""
219
+ parser = argparse.ArgumentParser(
220
+ description=(
221
+ "Compute energy spectra from 2D vorticity trajectory data. "
222
+ "Loads vorticity fields from a .npy file, computes the azimuthally "
223
+ "averaged energy spectrum E(k), and saves both the spectrum data "
224
+ "and a visualization plot."
225
+ ),
226
+ formatter_class=argparse.RawDescriptionHelpFormatter,
227
+ epilog="""
228
+ Examples:
229
+ uv run python fluid_stats.py simulation.npy
230
+ uv run python fluid_stats.py data/vorticity.npy --out_dir results/
231
+
232
+ Input format:
233
+ The input .npy file should contain a 4D array with shape (batch, time, X, Y)
234
+ where batch is the number of independent trajectories, time is the number
235
+ of snapshots, and X, Y are the spatial grid dimensions.
236
+ """,
237
+ )
238
+
239
+ parser.add_argument(
240
+ "input_file",
241
+ type=str,
242
+ help=(
243
+ "Path to the input .npy file containing vorticity data. "
244
+ "Expected shape: (batch, time, X, Y) where X and Y are the "
245
+ "spatial grid dimensions (must be square, i.e., X == Y)."
246
+ ),
247
+ )
248
+
249
+ parser.add_argument(
250
+ "--out_dir",
251
+ type=str,
252
+ default=".",
253
+ help=(
254
+ "Directory to save output files. Will be created if it does not "
255
+ "exist. Output files are named based on the input filename. "
256
+ "Default: current directory."
257
+ ),
258
+ )
259
+
260
+ return parser.parse_args()
261
+
262
+
263
+ def main():
264
+ """Main entry point for energy spectrum computation."""
265
+ args = parse_args()
266
+
267
+ # Setup
268
+ logger.info("JAX devices: %s", jax.devices())
269
+
270
+ # Validate input file
271
+ if not os.path.exists(args.input_file):
272
+ logger.error("Input file not found: %s", args.input_file)
273
+ raise FileNotFoundError(f"Input file not found: {args.input_file}")
274
+
275
+ if not args.input_file.endswith(".npy"):
276
+ logger.warning(
277
+ "Input file does not have .npy extension: %s", args.input_file
278
+ )
279
+
280
+ # Create output directory
281
+ os.makedirs(args.out_dir, exist_ok=True)
282
+
283
+ # Generate output filenames from input filename
284
+ input_basename = os.path.splitext(os.path.basename(args.input_file))[0]
285
+ data_filename = f"{input_basename}_spectrum_data.npz"
286
+ plot_filename = f"{input_basename}_spectrum.png"
287
+ data_path = os.path.join(args.out_dir, data_filename)
288
+ plot_path = os.path.join(args.out_dir, plot_filename)
289
+
290
+ # Load data
291
+ logger.info("Loading data from: %s", args.input_file)
292
+ field = np.load(args.input_file)
293
+ logger.info("Loaded field with shape: %s", field.shape)
294
+
295
+ # Validate shape
296
+ if field.ndim != 4:
297
+ logger.error(
298
+ "Expected 4D array (batch, time, X, Y), got %dD array", field.ndim
299
+ )
300
+ raise ValueError(
301
+ f"Expected 4D array (batch, time, X, Y), got {field.ndim}D array"
302
+ )
303
+
304
+ batch_size, time_steps, height, width = field.shape
305
+ if height != width:
306
+ logger.error(
307
+ "Expected square spatial grid (X == Y), got %d x %d", height, width
308
+ )
309
+ raise ValueError(
310
+ f"Expected square spatial grid (X == Y), got {height} x {width}"
311
+ )
312
+
313
+ resolution = height
314
+ k_max = resolution // 3
315
+ logger.info(
316
+ "Processing %d trajectories with %d timesteps at %dx%d resolution",
317
+ batch_size,
318
+ time_steps,
319
+ resolution,
320
+ resolution,
321
+ )
322
+ logger.info("Maximum wavenumber (k_max): %d", k_max)
323
+
324
+ # Compute energy spectrum
325
+ logger.info("Computing energy spectra...")
326
+ spectra_list = []
327
+
328
+ for i in tqdm(range(batch_size), desc="Computing spectra"):
329
+ if resolution > 1024:
330
+ # Use memory-efficient lax.map for large resolutions
331
+ single_spectrum = energy_spectrum_from_vorticity_lax_map(
332
+ field[i], k_max
333
+ )
334
+ else:
335
+ # Use vmap for moderate resolutions
336
+ single_spectrum = energy_spectrum_from_vorticity(field[i], k_max)
337
+ spectra_list.append(single_spectrum)
338
+
339
+ # Stack all spectra
340
+ all_spectra = jnp.stack(spectra_list)
341
+ logger.info("All spectra shape: %s", all_spectra.shape)
342
+
343
+ # Compute mean spectrum (over batch and time)
344
+ mean_spectrum = all_spectra.reshape(-1, all_spectra.shape[-1]).mean(axis=0)
345
+ logger.info("Mean spectrum shape: %s", mean_spectrum.shape)
346
+
347
+ # Save spectrum data
348
+ logger.info("Saving spectrum data to: %s", data_path)
349
+ np.savez_compressed(
350
+ data_path,
351
+ mean_spectrum=np.array(mean_spectrum),
352
+ all_spectra=np.array(all_spectra),
353
+ k_values=np.arange(len(mean_spectrum)),
354
+ resolution=resolution,
355
+ batch_size=batch_size,
356
+ time_steps=time_steps,
357
+ )
358
+
359
+ # Generate plot
360
+ logger.info("Generating energy spectrum plot...")
361
+ plt.figure(figsize=(10, 6))
362
+
363
+ # Plot mean spectrum (skip k=0)
364
+ offset = 1
365
+ spectrum = mean_spectrum[offset:]
366
+ k_values = np.arange(offset, len(mean_spectrum))
367
+ plt.loglog(k_values, spectrum, "b-", linewidth=2, label="Mean spectrum")
368
+
369
+ # Add k^{-5/3} reference line (Kolmogorov scaling for 3D turbulence)
370
+ # and k^{-3} reference line (enstrophy cascade in 2D turbulence)
371
+ k_match = min(10, len(spectrum) // 3)
372
+ if k_match > 0:
373
+ ref_value = float(spectrum[k_match - 1])
374
+
375
+ # k^{-3} line (2D enstrophy cascade)
376
+ scaling_k3 = ref_value * (k_match**3)
377
+ k_theory = np.logspace(0, np.log10(len(mean_spectrum)), 100)
378
+ power_law_k3 = scaling_k3 * k_theory ** (-3)
379
+ plt.loglog(
380
+ k_theory,
381
+ power_law_k3,
382
+ "k--",
383
+ alpha=0.7,
384
+ linewidth=1.5,
385
+ label=r"$k^{-3}$ (enstrophy cascade)",
386
+ )
387
+
388
+ # k^{-5/3} line (inverse energy cascade)
389
+ scaling_k53 = ref_value * (k_match ** (5 / 3))
390
+ power_law_k53 = scaling_k53 * k_theory ** (-5 / 3)
391
+ plt.loglog(
392
+ k_theory,
393
+ power_law_k53,
394
+ "r--",
395
+ alpha=0.7,
396
+ linewidth=1.5,
397
+ label=r"$k^{-5/3}$ (energy cascade)",
398
+ )
399
+
400
+ plt.xlabel("Wavenumber k", fontsize=12)
401
+ plt.ylabel("Energy Spectrum E(k)", fontsize=12)
402
+ plt.title(f"Energy Spectrum ({resolution}x{resolution} resolution)", fontsize=14)
403
+ plt.legend()
404
+ plt.grid(True, alpha=0.3)
405
+ xlim = plt.xlim()
406
+ plt.xlim(1, xlim[1])
407
+ plt.tight_layout()
408
+
409
+ # Save plot
410
+ plt.savefig(plot_path, dpi=300, bbox_inches="tight")
411
+ plt.close()
412
+ logger.info("Plot saved to: %s", plot_path)
413
+
414
+ logger.info("Done!")
415
+
416
+
417
+ if __name__ == "__main__":
418
+ main()
huggingface_pdeinv_download.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from huggingface_hub import snapshot_download
3
+
4
+ datasets = [
5
+ "darcy-flow-241",
6
+ "darcy-flow-421",
7
+ "korteweg-de-vries-1d",
8
+ "navier-stokes-forced-2d-2048",
9
+ "navier-stokes-forced-2d",
10
+ "navier-stokes-unforced-2d",
11
+ "reaction-diffusion-2d-du-512",
12
+ "reaction-diffusion-2d-du",
13
+ "reaction-diffusion-2d-k-512",
14
+ "reaction-diffusion-2d-k",
15
+ ]
16
+
17
+ splits = [
18
+ "*",
19
+ "train",
20
+ "validation",
21
+ "test",
22
+ "out_of_distribution",
23
+ "out_of_distribution_extreme",
24
+ ]
25
+
26
+
27
+ def main():
28
+ parser = argparse.ArgumentParser(
29
+ description="Download PDE Inverse Problem Benchmarking datasets"
30
+ )
31
+ parser.add_argument(
32
+ "--dataset",
33
+ type=str,
34
+ default="darcy-flow-241",
35
+ choices=datasets,
36
+ help="Dataset to download",
37
+ )
38
+ parser.add_argument(
39
+ "--split", type=str, default="*", choices=splits, help="Data split to download"
40
+ )
41
+ parser.add_argument(
42
+ "--local-dir", type=str, default="", help="Local directory to save data"
43
+ )
44
+
45
+ args = parser.parse_args()
46
+
47
+ data_bucket = "DabbyOWL/PDE_Inverse_Problem_Benchmarking"
48
+
49
+ print(f"Downloading {args.dataset}/{args.split} to {args.local_dir}")
50
+
51
+ snapshot_download(
52
+ data_bucket,
53
+ allow_patterns=[f"{args.dataset}/{args.split}/*"],
54
+ local_dir=args.local_dir,
55
+ repo_type="dataset",
56
+ )
57
+
58
+
59
+ if __name__ == "__main__":
60
+ main()
pdeinvbench/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from pdeinvbench import data
2
+ from pdeinvbench import lightning_modules
3
+ from pdeinvbench import losses
4
+ from pdeinvbench import losses
5
+ from pdeinvbench import utils
pdeinvbench/data/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from pdeinvbench.data.dataset import PDE_MultiParam
pdeinvbench/data/dataset.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import logging
3
+ import math
4
+
5
+ import h5py
6
+ import numpy as np
7
+ import torch
8
+ from scipy import signal
9
+ from torch.utils.data import Dataset
10
+
11
+ from pdeinvbench.data.transforms import collapse_time_and_channels_torch_transform
12
+ from pdeinvbench.data.utils import extract_params_from_path
13
+ from pdeinvbench.utils.types import PDE, PDE_NUM_SPATIAL, PDE_TRAJ_LEN
14
+
15
+
16
+ class PDE_MultiParam(Dataset):
17
+ """Data Loader that loads the multiple parameter version of PDE Datasets."""
18
+
19
+ def __init__(
20
+ self,
21
+ data_root: str,
22
+ pde: PDE,
23
+ n_past: int,
24
+ dilation: int,
25
+ cutoff_first_n_frames: int,
26
+ train: bool,
27
+ frac_param_combinations: float = 1,
28
+ frac_ics_per_param: float = 1,
29
+ random_sample_param: bool = True,
30
+ downsample_factor: int = 0,
31
+ every_nth_window: int = 1,
32
+ window_start_percent: float = 0.0,
33
+ window_end_percent: float = 1.0,
34
+ ):
35
+ """
36
+ Args:
37
+ data_root: path containing the h5 files for the current data split
38
+ pde: name of the PDE system - one of the enum values.
39
+ n_past: number of conditioning frames
40
+ dilation: frequency at which to subsample the ground truth trajectories in the time dimension
41
+ cutoff_first_n_frames: number of initial frames to cutoff in each trajectory (may want to do this e.g. if initial PDE residuals are very high)
42
+ train: if training dataloader, windows are randomly sampled from each trajecory, if non-training dataloader we loop through all non-overlapping windows
43
+ frac_param_combinations: fraction of parameter combinations to use. 1 takes all parameters. "0.x" takes x percent of total parameters
44
+ frac_ics_per_param: fraction of initial conditions per parameter combination to keep.
45
+ random_sample_param: (bool) If frac_param_combinations < 1, true means we randomly sample params and false means we grab the first n_frac params. Defaults to true.
46
+ downsample_factor: downsample a solution field spatially by the 'downsample_factor'. eg if downsample_factor=4, sol field spatial size=[128,128] --downsample--> final spatial size = [32,32]
47
+ every_nth_window: take every nth window from the list of non-over-lapping windows
48
+ window_start_percent: percent of the way through the trajectory to start the window after cutoff_first_n_frames
49
+ window_end_percent: percent of the way through the trajectory to end the window
50
+ """
51
+
52
+ self.data_root = data_root
53
+ self.pde = pde
54
+ self.n_past = n_past
55
+ self.dilation = dilation
56
+ self.cutoff_first_n_frames = cutoff_first_n_frames
57
+ self.frac_param_combinations = frac_param_combinations
58
+ self.frac_ics_per_param = frac_ics_per_param
59
+ self.random_sample_param = random_sample_param
60
+ self.train = train
61
+ self.every_nth_window = every_nth_window
62
+ assert (
63
+ window_start_percent < window_end_percent
64
+ ), "window_start_percent must be less than window_end_percent"
65
+ self.window_start_index = int(
66
+ (PDE_TRAJ_LEN[self.pde] - self.cutoff_first_n_frames) * window_start_percent
67
+ + self.cutoff_first_n_frames
68
+ )
69
+ self.window_end_index = int(
70
+ (PDE_TRAJ_LEN[self.pde] - self.cutoff_first_n_frames) * window_end_percent
71
+ + self.cutoff_first_n_frames
72
+ )
73
+ self.total_trajectory_length = self.window_end_index - self.window_start_index
74
+
75
+ if self.train:
76
+ self.num_windows = self.total_trajectory_length - self.n_past - 1
77
+ else:
78
+ self.num_windows = (self.total_trajectory_length) // (
79
+ (self.n_past) * self.every_nth_window
80
+ )
81
+
82
+ if self.num_windows == 0 and self.every_nth_window > 1:
83
+ self.every_nth_window = 1
84
+ self.num_windows = (self.total_trajectory_length) // ((self.n_past))
85
+
86
+ # Quick check basically force a non-AR dataloader for darcy flow
87
+ if self.pde == PDE.DarcyFlow2D:
88
+ self.num_windows = 1
89
+
90
+ self.downsample_factor = downsample_factor
91
+
92
+ if PDE_NUM_SPATIAL[pde] == 2:
93
+ self.transforms = [collapse_time_and_channels_torch_transform]
94
+ else:
95
+ self.transforms = None
96
+
97
+ # get all h5 paths in the root folder and read them
98
+ # each h5 path represents a set of trajectories with a different PDE parameter
99
+ self.h5_paths = glob.glob(f"{self.data_root}/*.h5")
100
+ if len(self.h5_paths) == 0:
101
+ self.h5_paths = glob.glob(f"{self.data_root}/*.hdf5")
102
+ if self.pde == PDE.DarcyFlow2D:
103
+ self.h5_files = [file for file in self.h5_paths]
104
+ else:
105
+ self.h5_files = [h5py.File(file, "r") for file in self.h5_paths]
106
+
107
+ # extract the individual trajectories from each h5 file
108
+ if self.pde == PDE.ReactionDiffusion2D or self.pde == PDE.TurbulentFlow2D:
109
+ self.seqs = [list(h5_file.keys()) for h5_file in self.h5_files]
110
+ elif self.pde == PDE.NavierStokes2D:
111
+ # The individual trajectories are stored in key: 'solutions'
112
+ self.seqs = [h5_file["solutions"] for h5_file in self.h5_files]
113
+ elif self.pde == PDE.KortewegDeVries1D:
114
+ self.seqs = [h5_file["tensor"] for h5_file in self.h5_files]
115
+ elif self.pde == PDE.DarcyFlow2D:
116
+ # There is an issue where too many files are open, os throws errno 24
117
+ self.seqs = [file for file in self.h5_paths]
118
+ else:
119
+ self.seqs = [h5py.File(file, "r") for file in self.h5_paths]
120
+ if self.frac_param_combinations < 1:
121
+ total_params = math.ceil(len(self.seqs) * self.frac_ics_per_param)
122
+
123
+ logging.info(
124
+ f"trimming dataset from length {len(self.seqs)} to {total_params}"
125
+ )
126
+ if self.random_sample_param:
127
+ # Just a quick sanity check to ensure that all of the variables are the same length
128
+ # If this fails, something has gone VERY wrong
129
+ assert len(self.seqs) == len(self.h5_paths) and len(
130
+ self.h5_paths
131
+ ) == len(
132
+ self.h5_files
133
+ ), f"The dataloader variables are mismatched. seqs = {len(self.seqs)}, h5_paths = {len(self.h5_paths)}, h5_files = {len(self.h5_files)}"
134
+
135
+ # We've had issues in the past with reproducibility so this forces a seed
136
+ # Also will keep the datasets the same regardless of the training and weight init seeds
137
+ num_sequences: int = len(self.seqs)
138
+ requested_dataset_size: int = int(
139
+ num_sequences * self.frac_param_combinations
140
+ )
141
+ indices = np.arange(num_sequences)
142
+ sample_seed: int = 42
143
+ rng_generator = np.random.default_rng(seed=sample_seed)
144
+ sampled_indices = rng_generator.choice(
145
+ indices, size=requested_dataset_size, replace=False
146
+ )
147
+ logging.info(
148
+ f"Using random sampling to trim the dataset down from length {len(self.seqs)} to {requested_dataset_size}"
149
+ )
150
+ assert (
151
+ len(set(sampled_indices.tolist())) == sampled_indices.shape[0]
152
+ ), f"Duplicate items in random sampling of PDE parameters!"
153
+ assert (
154
+ sampled_indices.shape[0] == requested_dataset_size
155
+ ), f"Mismatch between the requested dataset sample size and the new sampled dataset. frac requested = {self.frac_param_combinations}, requested size = {requested_dataset_size}, new size = {sampled_indices.shape[0]}"
156
+ self.seqs = [self.seqs[i] for i in sampled_indices]
157
+ self.h5_paths = [self.h5_paths[i] for i in sampled_indices]
158
+ self.h5_files = [self.h5_files[i] for i in sampled_indices]
159
+ else:
160
+ self.seqs = self.seqs[:total_params]
161
+ self.h5_paths = self.h5_paths[:total_params]
162
+ self.h5_files = self.h5_files[:total_params]
163
+
164
+ self.num_params = len(self.seqs)
165
+ if self.pde == PDE.KortewegDeVries1D:
166
+ # Since it follows the same format at 1D reaction diffusion
167
+ self.num_ics_per_param = self.seqs[0].shape[0]
168
+ elif self.pde == PDE.DarcyFlow2D:
169
+ self.num_ics_per_param = 1 # Each param only has one IC
170
+ elif self.pde != PDE.NavierStokes2D:
171
+ self.num_ics_per_param = len(
172
+ min([self.seqs[i] for i in range(len(self.seqs))])
173
+ ) # to manage un-even number of ICs per param
174
+ else:
175
+ self.num_ics_per_param = min(
176
+ [self.seqs[i].shape[0] for i in range(len(self.seqs))]
177
+ )
178
+
179
+ # Trim nmber of ICs per parameter
180
+
181
+ self.num_ics_per_param = math.ceil(
182
+ self.num_ics_per_param * self.frac_ics_per_param
183
+ )
184
+ # We also need to save the dx, dt, dy information in order to compute the PDE residual
185
+ if pde == PDE.ReactionDiffusion2D or pde == PDE.TurbulentFlow2D:
186
+ self.x = self.h5_files[0]["0001"]["grid"]["x"][:]
187
+ self.y = self.h5_files[0]["0001"]["grid"]["y"][:]
188
+ self.t = torch.Tensor(self.h5_files[0]["0001"]["grid"]["t"][:])
189
+ elif pde == PDE.NavierStokes2D:
190
+ self.x = self.h5_files[0]["x-coordinate"][:]
191
+ self.y = self.h5_files[0]["y-coordinate"][:]
192
+ self.t = torch.Tensor(self.h5_files[0]["t-coordinate"][:])
193
+ elif pde == PDE.DarcyFlow2D:
194
+ # Not ideal but it's fine to just hard code the current coordinates darcy flow
195
+ domain_len = 1 # Uniform grid with 1 - same regardless of resolution
196
+ d = h5py.File(self.seqs[0], "r")
197
+ size, _, _ = d["sol"].shape
198
+ d.close()
199
+ x = np.linspace(0, domain_len, size, endpoint=False)
200
+ self.x = torch.from_numpy(x)
201
+ self.y = torch.from_numpy(x)
202
+ self.t = (
203
+ torch.ones(10, dtype=float) * -1
204
+ ) # Darcy flow is non time dependent so we use -1
205
+ else:
206
+ # All of the 1D systems
207
+ self.y = None # There is no y component
208
+ self.x = self.h5_files[0]["x-coordinate"][:]
209
+ self.t = torch.Tensor(self.h5_files[0]["t-coordinate"][:])
210
+
211
+ if self.downsample_factor != 0:
212
+ self.y = (
213
+ None
214
+ if self.y is None
215
+ else signal.decimate(self.y, q=self.downsample_factor, axis=0).copy()
216
+ )
217
+ self.x = signal.decimate(self.x, q=self.downsample_factor, axis=0).copy()
218
+ self.x = torch.Tensor(self.x)
219
+ self.y = torch.Tensor(self.y) if self.y is not None else None
220
+
221
+ logging.info(
222
+ f"Initialized dataset with {self.num_params} parameter combinations"
223
+ )
224
+
225
+ def __len__(self):
226
+ """
227
+ Number of parameters * number of ICs = number of full trajectories.
228
+ """
229
+ if self.train:
230
+ return self.num_params * self.num_ics_per_param
231
+ else:
232
+ return self.num_params * self.num_ics_per_param * self.num_windows
233
+
234
+ def __getitem__(self, index: int):
235
+ """
236
+ Loops over all parameters and ICs, and randomly samples time windows.
237
+ Returns:
238
+ x: conditioning frames, shape of [n_past, spatial/channel dims]
239
+ y: target frame(s), shape of [n_future, spatial/channel dims]
240
+ param_dict: dictionary containing the true PDE parameter for the trajectory.
241
+ """
242
+ # Compute the parameter and ic index for train loader
243
+ if self.train:
244
+ param_index = index // self.num_ics_per_param
245
+ ic_index = index % self.num_ics_per_param
246
+ else:
247
+ # Compute the parameter, ic index, and window index for validation/test loaders
248
+ # index is assumed to be in row major format of [num_params, num_ics_per_param, num_windows] dataset matrix organization
249
+ param_index = index // (self.num_ics_per_param * self.num_windows)
250
+ ic_index = (index // self.num_windows) % self.num_ics_per_param
251
+ window_index = index % self.num_windows
252
+ # get the corresponding trajectory and parameters
253
+ h5_file = self.h5_files[param_index]
254
+ h5_path = self.h5_paths[param_index]
255
+ param_dict = extract_params_from_path(h5_path, self.pde)
256
+
257
+ if self.pde == PDE.ReactionDiffusion2D or self.pde == PDE.TurbulentFlow2D:
258
+ # get data
259
+ seq = self.seqs[param_index][ic_index]
260
+ traj = torch.Tensor(
261
+ np.array(h5_file[f"{seq}/data"], dtype="f")
262
+ ) # dim = [seq_len, spatial_dim_1, spatial_dim_2, channels]
263
+ elif self.pde == PDE.NavierStokes2D:
264
+ seq = self.seqs[param_index]
265
+ traj = torch.Tensor(seq[ic_index])
266
+ # dim = [seq_len (t), spatial_dim_1, spatial_dim_2, channels]
267
+
268
+ elif self.pde == PDE.DarcyFlow2D:
269
+ # Unique since there is no time dim
270
+ # There is also only one ic per param
271
+ seq = h5py.File(self.seqs[param_index], "r")
272
+
273
+ coeff = torch.from_numpy(np.asarray(seq["coeff"]))
274
+ coeff = torch.squeeze(coeff)
275
+ coeff = torch.unsqueeze(coeff, dim=0) # Channel first repr
276
+ # We treat the coeff as a binary mask
277
+ min_val = coeff.min()
278
+ max_val = coeff.max()
279
+ # generate the binary mask
280
+ coeff = coeff - min_val
281
+ binary_mask = coeff > 0
282
+
283
+ def wrap_scalar(x):
284
+ return torch.Tensor([x.item()])
285
+
286
+ param_dict["coeff"] = binary_mask.float()
287
+ param_dict["max_val"] = wrap_scalar(max_val)
288
+ param_dict["min_val"] = wrap_scalar(min_val)
289
+ traj = torch.from_numpy(np.asarray(seq["sol"]))
290
+ seq.close()
291
+ else:
292
+ seq = self.seqs[param_index]
293
+ traj = torch.Tensor(np.array(h5_file["tensor"][ic_index]))
294
+ traj = traj[:: self.dilation] # subsample based on dilation
295
+
296
+ # sample a random window of length [n_past] from this trajectory
297
+ if traj.shape[0] - self.n_past == 0:
298
+ start = 0
299
+ # if n_past > 1, problem is well posed
300
+ if self.n_past == 1:
301
+ raise ValueError("Problem is ill-posed when n_past == 1. ")
302
+ else:
303
+ if self.train:
304
+ start = np.random.randint(
305
+ self.window_start_index,
306
+ self.window_end_index - self.n_past,
307
+ )
308
+ else:
309
+ # multiply with self.n_past to avoid overlapping in validation/test samples
310
+ start = self.window_start_index + (
311
+ window_index * (self.n_past) * self.every_nth_window
312
+ )
313
+
314
+ if self.pde != PDE.DarcyFlow2D:
315
+ traj = traj[start : start + self.n_past]
316
+ time_frames = self.t[start : start + self.n_past]
317
+ else:
318
+ time_frames = -1 * torch.ones(self.n_past, dtype=float)
319
+ # 2D systems
320
+ if len(traj.shape) == 4:
321
+ # [T, Channels, Spatial, Spatial]
322
+ traj = traj.permute((0, 3, 1, 2))
323
+
324
+ if self.downsample_factor != 0:
325
+ traj = signal.decimate(traj, q=self.downsample_factor, axis=-1)
326
+ traj = (
327
+ torch.Tensor(
328
+ signal.decimate(traj, q=self.downsample_factor, axis=-2).copy()
329
+ )
330
+ if len(traj.shape) == 4
331
+ else torch.Tensor(traj.copy())
332
+ )
333
+
334
+ # split into conditioning and target frames
335
+ if self.pde == PDE.DarcyFlow2D:
336
+ # Transforms to reshape the traj to the expected shape
337
+ # nx x ny x 1 -> T, C, X, Y
338
+ # T == C == 1
339
+ traj = torch.squeeze(traj)
340
+ traj = torch.unsqueeze(traj, dim=0)
341
+ traj = torch.unsqueeze(traj, dim=0)
342
+ x, y = (
343
+ traj,
344
+ traj,
345
+ )
346
+ x = x.float()
347
+ y = y.float()
348
+ else:
349
+ x, y = torch.split(traj, [self.n_past, 0], dim=0)
350
+
351
+ if self.transforms is not None:
352
+ # Perform any data transforms if specified
353
+ for T in self.transforms:
354
+ x, y, param_dict = T((x, y, param_dict))
355
+
356
+ # return spatial/temporal grid, frames and parameters
357
+ spatial_grid = (self.x, self.y) if self.y is not None else (self.x,)
358
+
359
+ ic_index = torch.tensor([ic_index], dtype=float)
360
+ return spatial_grid, self.t, x, y, time_frames, ic_index, param_dict
pdeinvbench/data/transforms.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+ from typing import Dict, Tuple
3
+
4
+ import jaxtyping
5
+ import torch
6
+ import typeguard
7
+ from jaxtyping import Float, jaxtyped
8
+
9
+ """
10
+ Set of utility functions for data transformations.
11
+ """
12
+
13
+
14
+ @jaxtyped(typechecker=typeguard.typechecked)
15
+ def collapse_time_and_channels(
16
+ x: Float[torch.Tensor, "time channel xspace yspace"],
17
+ ) -> Float[torch.Tensor, "time*channel xspace yspace"]:
18
+ """
19
+ Collapses the time and channel dimensions of a tensor into a single dimension.
20
+ NOTE: This is only applicable to 2D systems and this is NOT batched!
21
+ We do this to be compatible with FNO. FNO can't handle multiple function outputs
22
+ at once since we're already using the channel dimension to represent time.
23
+ :param x: Input tensor of shape (time, channel, xspace, yspace).
24
+ :return: Output tensor of shape (time*channel, xspace, yspace).
25
+ """
26
+ x_flattened = torch.flatten(x, start_dim=0, end_dim=1)
27
+ return x_flattened
28
+
29
+
30
+ @jaxtyped(typechecker=typeguard.typechecked)
31
+ def collapse_time_and_channels_torch_transform(
32
+ batch: Tuple[
33
+ Float[torch.Tensor, "time_n_past in_channels xspace yspace"],
34
+ Float[torch.Tensor, "time_n_fut out_channels xspace yspace"],
35
+ Dict[
36
+ str, Float[torch.Tensor, "param"] | Float[torch.Tensor, "xspace yspace 1"]
37
+ ],
38
+ ],
39
+ ) -> Tuple[
40
+ Float[torch.Tensor, "time_n_past*in_channels xspace yspace"],
41
+ Float[torch.Tensor, "time_n_fut*out_channels xspace yspace"],
42
+ Dict[str, Float[torch.Tensor, "param"] | Float[torch.Tensor, "xspace yspace 1"]],
43
+ ]:
44
+ """
45
+ Wrapper for ```collapse_time_and_channels``` to be used with PyTorch's dataloader transforms.
46
+ Accepts a batch and for the first two elements of the batch, collapses the time and channel dimensions.
47
+ :param batch: Tuple of (input, target, pde_params).
48
+ :return: Tuple of (input, target, pde_params)
49
+ """
50
+ input, target, pde_params = batch
51
+ input = collapse_time_and_channels(input)
52
+ target = collapse_time_and_channels(target)
53
+ return input, target, pde_params
54
+
55
+
56
+ @jaxtyped(typechecker=typeguard.typechecked)
57
+ def expand_time_and_channels(
58
+ x: Float[torch.Tensor, "timexchannel xspace yspace"],
59
+ num_channels: int = -1,
60
+ num_timesteps: int = -1,
61
+ ) -> Float[torch.Tensor, "time channel xspace yspace"]:
62
+ """
63
+ Expands the time and channel dimensions of a tensor into separate dimensions.
64
+ Either number of channels or number of timesteps must be specified.
65
+ NOTE: This is only applicable to 2D systems.
66
+ :param x: Input tensor of shape (time*channel, xspace, yspace).
67
+ :param num_channels: Number of channels to expand to. OPTIONAL if num_timesteps is specified.
68
+ :param num_timesteps: Number of timesteps to expand to. OPTIONAL if num_channels is specified.
69
+ :return: Output tensor of shape (time, channel, xspace, yspace).
70
+ """
71
+ assert (
72
+ num_channels != -1 or num_timesteps != -1
73
+ ), "Either num_channels or num_timesteps must be specified!"
74
+ if num_channels != -1:
75
+ # Case we infer the number of timesteps
76
+ x_unflattened = torch.unflatten(x, 0, (-1, num_channels))
77
+ else:
78
+ # Case we infer the number of channels
79
+ x_unflattened = torch.unflatten(x, 0, (num_timesteps, -1))
80
+ return x_unflattened